diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/hrtimer.c | 346 | ||||
-rw-r--r-- | kernel/time/itimer.c | 1 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 7 | ||||
-rw-r--r-- | kernel/time/ntp.c | 43 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 198 | ||||
-rw-r--r-- | kernel/time/posix-timers.c | 37 | ||||
-rw-r--r-- | kernel/time/tick-broadcast-hrtimer.c | 1 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 10 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 35 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 6 | ||||
-rw-r--r-- | kernel/time/timekeeping.h | 3 | ||||
-rw-r--r-- | kernel/time/timer.c | 95 |
12 files changed, 706 insertions, 76 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 93ef7190bdea..2c6be169bdc7 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -48,11 +48,13 @@ #include <linux/sched/rt.h> #include <linux/sched/deadline.h> #include <linux/timer.h> +#include <linux/kthread.h> #include <linux/freezer.h> #include <asm/uaccess.h> #include <trace/events/timer.h> +#include <trace/events/hist.h> #include "tick-internal.h" @@ -576,8 +578,7 @@ static int hrtimer_reprogram(struct hrtimer *timer, * When the callback is running, we do not reprogram the clock event * device. The timer callback is either running on a different CPU or * the callback is executed in the hrtimer_interrupt context. The - * reprogramming is handled either by the softirq, which called the - * callback or at the end of the hrtimer_interrupt. + * reprogramming is handled at the end of the hrtimer_interrupt. */ if (hrtimer_callback_running(timer)) return 0; @@ -621,6 +622,9 @@ static int hrtimer_reprogram(struct hrtimer *timer, return res; } +static void __run_hrtimer(struct hrtimer *timer, ktime_t *now); +static int hrtimer_rt_defer(struct hrtimer *timer); + /* * Initialize the high resolution related parts of cpu_base */ @@ -630,6 +634,21 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) base->hres_active = 0; } +static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base, + int wakeup) +{ + if (!hrtimer_reprogram(timer, base)) + return 0; + if (!wakeup) + return -ETIME; +#ifdef CONFIG_PREEMPT_RT_BASE + if (!hrtimer_rt_defer(timer)) + return -ETIME; +#endif + return 1; +} + static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) { ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; @@ -695,6 +714,44 @@ static void clock_was_set_work(struct work_struct *work) static DECLARE_WORK(hrtimer_work, clock_was_set_work); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * RT can not call schedule_work from real interrupt context. + * Need to make a thread to do the real work. + */ +static struct task_struct *clock_set_delay_thread; +static bool do_clock_set_delay; + +static int run_clock_set_delay(void *ignore) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (do_clock_set_delay) { + do_clock_set_delay = false; + schedule_work(&hrtimer_work); + } + schedule(); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void clock_was_set_delayed(void) +{ + do_clock_set_delay = true; + /* Make visible before waking up process */ + smp_wmb(); + wake_up_process(clock_set_delay_thread); +} + +static __init int create_clock_set_delay_thread(void) +{ + clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd"); + BUG_ON(!clock_set_delay_thread); + return 0; +} +early_initcall(create_clock_set_delay_thread); +#else /* PREEMPT_RT_FULL */ /* * Called from timekeeping and resume code to reprogramm the hrtimer * interrupt device on all cpus. @@ -703,6 +760,7 @@ void clock_was_set_delayed(void) { schedule_work(&hrtimer_work); } +#endif #else @@ -711,6 +769,13 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } static inline int hrtimer_switch_to_hres(void) { return 0; } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } +static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base, + int wakeup) +{ + return 0; +} + static inline int hrtimer_reprogram(struct hrtimer *timer, struct hrtimer_clock_base *base) { @@ -718,7 +783,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer, } static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } static inline void retrigger_next_event(void *arg) { } - #endif /* CONFIG_HIGH_RES_TIMERS */ /* @@ -836,6 +900,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(hrtimer_forward); +#ifdef CONFIG_PREEMPT_RT_BASE +# define wake_up_timer_waiters(b) wake_up(&(b)->wait) + +/** + * hrtimer_wait_for_timer - Wait for a running timer + * + * @timer: timer to wait for + * + * The function waits in case the timers callback function is + * currently executed on the waitqueue of the timer base. The + * waitqueue is woken up after the timer callback function has + * finished execution. + */ +void hrtimer_wait_for_timer(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = timer->base; + + if (base && base->cpu_base && !timer->irqsafe) + wait_event(base->cpu_base->wait, + !(timer->state & HRTIMER_STATE_CALLBACK)); +} + +#else +# define wake_up_timer_waiters(b) do { } while (0) +#endif + /* * enqueue_hrtimer - internal function to (re)start a timer * @@ -879,6 +969,11 @@ static void __remove_hrtimer(struct hrtimer *timer, if (!(timer->state & HRTIMER_STATE_ENQUEUED)) goto out; + if (unlikely(!list_empty(&timer->cb_entry))) { + list_del_init(&timer->cb_entry); + goto out; + } + next_timer = timerqueue_getnext(&base->active); timerqueue_del(&base->active, &timer->node); if (&timer->node == next_timer) { @@ -966,7 +1061,16 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); timer_stats_hrtimer_set_start_info(timer); +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + { + ktime_t now = new_base->get_time(); + if (ktime_to_ns(tim) < ktime_to_ns(now)) + timer->praecox = now; + else + timer->praecox = ktime_set(0, 0); + } +#endif leftmost = enqueue_hrtimer(timer, new_base); if (!leftmost) { @@ -980,15 +1084,26 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, * on dynticks target. */ wake_up_nohz_cpu(new_base->cpu_base->cpu); - } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) && - hrtimer_reprogram(timer, new_base)) { + } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases)) { + + ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); + if (ret < 0) { + /* + * In case we failed to reprogram the timer (mostly + * because out current timer is already elapsed), + * remove it again and report a failure. This avoids + * stale base->first entries. + */ + debug_deactivate(timer); + __remove_hrtimer(timer, new_base, + timer->state & HRTIMER_STATE_CALLBACK, 0); + } else if (ret > 0) { /* * Only allow reprogramming if the new base is on this CPU. * (it might still be on another CPU if the timer was pending) * * XXX send_remote_softirq() ? */ - if (wakeup) { /* * We need to drop cpu_base->lock to avoid a * lock ordering issue vs. rq->lock. @@ -996,9 +1111,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, raw_spin_unlock(&new_base->cpu_base->lock); raise_softirq_irqoff(HRTIMER_SOFTIRQ); local_irq_restore(flags); - return ret; - } else { - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); + return 0; } } @@ -1089,7 +1202,7 @@ int hrtimer_cancel(struct hrtimer *timer) if (ret >= 0) return ret; - cpu_relax(); + hrtimer_wait_for_timer(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1153,6 +1266,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, base = hrtimer_clockid_to_base(clock_id); timer->base = &cpu_base->clock_base[base]; + INIT_LIST_HEAD(&timer->cb_entry); timerqueue_init(&timer->node); #ifdef CONFIG_TIMER_STATS @@ -1236,6 +1350,126 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) timer->state &= ~HRTIMER_STATE_CALLBACK; } +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); + +#ifdef CONFIG_PREEMPT_RT_BASE +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + /* + * Note, we clear the callback flag before we requeue the + * timer otherwise we trigger the callback_running() check + * in hrtimer_reprogram(). + */ + timer->state &= ~HRTIMER_STATE_CALLBACK; + + if (restart != HRTIMER_NORESTART) { + BUG_ON(hrtimer_active(timer)); + /* + * Enqueue the timer, if it's the leftmost timer then + * we need to reprogram it. + */ + if (!enqueue_hrtimer(timer, base)) + return; + +#ifndef CONFIG_HIGH_RES_TIMERS + } +#else + if (base->cpu_base->hres_active && + hrtimer_reprogram(timer, base)) + goto requeue; + + } else if (hrtimer_active(timer)) { + /* + * If the timer was rearmed on another CPU, reprogram + * the event device. + */ + if (&timer->node == base->active.next && + base->cpu_base->hres_active && + hrtimer_reprogram(timer, base)) + goto requeue; + } + return; + +requeue: + /* + * Timer is expired. Thus move it from tree to pending list + * again. + */ + __remove_hrtimer(timer, base, timer->state, 0); + list_add_tail(&timer->cb_entry, &base->expired); +#endif +} + +/* + * The changes in mainline which removed the callback modes from + * hrtimer are not yet working with -rt. The non wakeup_process() + * based callbacks which involve sleeping locks need to be treated + * seperately. + */ +static void hrtimer_rt_run_pending(void) +{ + enum hrtimer_restart (*fn)(struct hrtimer *); + struct hrtimer_cpu_base *cpu_base; + struct hrtimer_clock_base *base; + struct hrtimer *timer; + int index, restart; + + local_irq_disable(); + cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); + + raw_spin_lock(&cpu_base->lock); + + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { + base = &cpu_base->clock_base[index]; + + while (!list_empty(&base->expired)) { + timer = list_first_entry(&base->expired, + struct hrtimer, cb_entry); + + /* + * Same as the above __run_hrtimer function + * just we run with interrupts enabled. + */ + debug_hrtimer_deactivate(timer); + __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); + timer_stats_account_hrtimer(timer); + fn = timer->function; + + raw_spin_unlock_irq(&cpu_base->lock); + restart = fn(timer); + raw_spin_lock_irq(&cpu_base->lock); + + hrtimer_rt_reprogram(restart, timer, base); + } + } + + raw_spin_unlock_irq(&cpu_base->lock); + + wake_up_timer_waiters(cpu_base); +} + +static int hrtimer_rt_defer(struct hrtimer *timer) +{ + if (timer->irqsafe) + return 0; + + __remove_hrtimer(timer, timer->base, timer->state, 0); + list_add_tail(&timer->cb_entry, &timer->base->expired); + return 1; +} + +#else + +static inline void hrtimer_rt_run_pending(void) +{ + hrtimer_peek_ahead_timers(); +} + +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } + +#endif + #ifdef CONFIG_HIGH_RES_TIMERS /* @@ -1246,7 +1480,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) { struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); ktime_t expires_next, now, entry_time, delta; - int i, retries = 0; + int i, retries = 0, raise = 0; BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; @@ -1281,6 +1515,15 @@ retry: timer = container_of(node, struct hrtimer, node); + trace_hrtimer_interrupt(raw_smp_processor_id(), + ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? + timer->praecox : hrtimer_get_expires(timer), + basenow)), + current, + timer->function == hrtimer_wakeup ? + container_of(timer, struct hrtimer_sleeper, + timer)->task : NULL); + /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the @@ -1296,7 +1539,10 @@ retry: if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) break; - __run_hrtimer(timer, &basenow); + if (!hrtimer_rt_defer(timer)) + __run_hrtimer(timer, &basenow); + else + raise = 1; } } /* Reevaluate the clock bases for the next expiry */ @@ -1313,7 +1559,7 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; - return; + goto out; } /* @@ -1357,6 +1603,9 @@ retry: tick_program_event(expires_next, 1); printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); +out: + if (raise) + raise_softirq_irqoff(HRTIMER_SOFTIRQ); } /* @@ -1392,18 +1641,18 @@ void hrtimer_peek_ahead_timers(void) __hrtimer_peek_ahead_timers(); local_irq_restore(flags); } - -static void run_hrtimer_softirq(struct softirq_action *h) -{ - hrtimer_peek_ahead_timers(); -} - #else /* CONFIG_HIGH_RES_TIMERS */ static inline void __hrtimer_peek_ahead_timers(void) { } #endif /* !CONFIG_HIGH_RES_TIMERS */ + +static void run_hrtimer_softirq(struct softirq_action *h) +{ + hrtimer_rt_run_pending(); +} + /* * Called from timer softirq every jiffy, expire hrtimers: * @@ -1436,7 +1685,7 @@ void hrtimer_run_queues(void) struct timerqueue_node *node; struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); struct hrtimer_clock_base *base; - int index, gettime = 1; + int index, gettime = 1, raise = 0; if (hrtimer_hres_active()) return; @@ -1461,10 +1710,16 @@ void hrtimer_run_queues(void) hrtimer_get_expires_tv64(timer)) break; - __run_hrtimer(timer, &base->softirq_time); + if (!hrtimer_rt_defer(timer)) + __run_hrtimer(timer, &base->softirq_time); + else + raise = 1; } raw_spin_unlock(&cpu_base->lock); } + + if (raise) + raise_softirq_irqoff(HRTIMER_SOFTIRQ); } /* @@ -1486,16 +1741,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; + sl->timer.irqsafe = 1; sl->task = task; } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, + unsigned long state) { hrtimer_init_sleeper(t, current); do { - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(state); hrtimer_start_expires(&t->timer, mode); if (!hrtimer_active(&t->timer)) t->task = NULL; @@ -1539,7 +1796,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) HRTIMER_MODE_ABS); hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); - if (do_nanosleep(&t, HRTIMER_MODE_ABS)) + /* cpu_chill() does not care about restart state. */ + if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) goto out; rmtp = restart->nanosleep.rmtp; @@ -1556,8 +1814,10 @@ out: return ret; } -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, - const enum hrtimer_mode mode, const clockid_t clockid) +static long +__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid, + unsigned long state) { struct restart_block *restart; struct hrtimer_sleeper t; @@ -1570,7 +1830,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, hrtimer_init_on_stack(&t.timer, clockid, mode); hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); - if (do_nanosleep(&t, mode)) + if (do_nanosleep(&t, mode, state)) goto out; /* Absolute timers do not update the rmtp value and restart: */ @@ -1597,6 +1857,12 @@ out: return ret; } +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid) +{ + return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); +} + SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, struct timespec __user *, rmtp) { @@ -1611,6 +1877,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Sleep for 1 ms in hope whoever holds what we want will let it go. + */ +void cpu_chill(void) +{ + struct timespec tu = { + .tv_nsec = NSEC_PER_MSEC, + }; + unsigned int freeze_flag = current->flags & PF_NOFREEZE; + + current->flags |= PF_NOFREEZE; + __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, + TASK_UNINTERRUPTIBLE); + if (!freeze_flag) + current->flags &= ~PF_NOFREEZE; +} +EXPORT_SYMBOL(cpu_chill); +#endif + /* * Functions related to boot-time initialization: */ @@ -1622,10 +1908,14 @@ static void init_hrtimers_cpu(int cpu) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); + INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); } cpu_base->cpu = cpu; hrtimer_init_hres(cpu_base); +#ifdef CONFIG_PREEMPT_RT_BASE + init_waitqueue_head(&cpu_base->wait); +#endif } #ifdef CONFIG_HOTPLUG_CPU @@ -1731,9 +2021,7 @@ void __init hrtimers_init(void) hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&hrtimers_nb); -#ifdef CONFIG_HIGH_RES_TIMERS open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); -#endif } /** diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index 8d262b467573..d0513909d663 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -213,6 +213,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 347fecf86a3f..2ede47408a3e 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = { .max_cycles = 10, }; -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); +__cacheline_aligned_in_smp seqcount_t jiffies_seq; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) @@ -83,9 +84,9 @@ u64 get_jiffies_64(void) u64 ret; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); ret = jiffies_64; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 7a681003001c..bd9c53985d32 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -10,6 +10,7 @@ #include <linux/workqueue.h> #include <linux/hrtimer.h> #include <linux/jiffies.h> +#include <linux/kthread.h> #include <linux/math64.h> #include <linux/timex.h> #include <linux/time.h> @@ -529,10 +530,52 @@ static void sync_cmos_clock(struct work_struct *work) &sync_cmos_work, timespec_to_jiffies(&next)); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * RT can not call schedule_delayed_work from real interrupt context. + * Need to make a thread to do the real work. + */ +static struct task_struct *cmos_delay_thread; +static bool do_cmos_delay; + +static int run_cmos_delay(void *ignore) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (do_cmos_delay) { + do_cmos_delay = false; + queue_delayed_work(system_power_efficient_wq, + &sync_cmos_work, 0); + } + schedule(); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +void ntp_notify_cmos_timer(void) +{ + do_cmos_delay = true; + /* Make visible before waking up process */ + smp_wmb(); + wake_up_process(cmos_delay_thread); +} + +static __init int create_cmos_delay_thread(void) +{ + cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); + BUG_ON(!cmos_delay_thread); + return 0; +} +early_initcall(create_cmos_delay_thread); + +#else + void ntp_notify_cmos_timer(void) { queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); } +#endif /* CONFIG_PREEMPT_RT_FULL */ #else void ntp_notify_cmos_timer(void) { } diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 57d1acb91c56..5b24aefef595 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -3,6 +3,7 @@ */ #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/posix-timers.h> #include <linux/errno.h> #include <linux/math64.h> @@ -626,7 +627,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, /* * Disarm any old timer after extracting its expiry time. */ - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); ret = 0; old_incr = timer->it.cpu.incr; @@ -1048,7 +1049,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) /* * Now re-arm for the new expiry time. */ - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); arm_timer(timer); unlock_task_sighand(p, &flags); @@ -1114,10 +1115,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk) sig = tsk->signal; if (sig->cputimer.running) { struct task_cputime group_sample; + unsigned long flags; - raw_spin_lock(&sig->cputimer.lock); + raw_spin_lock_irqsave(&sig->cputimer.lock, flags); group_sample = sig->cputimer.cputime; - raw_spin_unlock(&sig->cputimer.lock); + raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags); if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; @@ -1131,13 +1133,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk) * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -void run_posix_cpu_timers(struct task_struct *tsk) +static void __run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); /* * The fast path checks that there are no expired thread or thread @@ -1195,6 +1197,190 @@ void run_posix_cpu_timers(struct task_struct *tsk) } } +#ifdef CONFIG_PREEMPT_RT_BASE +#include <linux/kthread.h> +#include <linux/cpu.h> +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); + +static int posix_cpu_timers_thread(void *data) +{ + int cpu = (long)data; + + BUG_ON(per_cpu(posix_timer_task,cpu) != current); + + while (!kthread_should_stop()) { + struct task_struct *tsk = NULL; + struct task_struct *next = NULL; + + if (cpu_is_offline(cpu)) + goto wait_to_die; + + /* grab task list */ + raw_local_irq_disable(); + tsk = per_cpu(posix_timer_tasklist, cpu); + per_cpu(posix_timer_tasklist, cpu) = NULL; + raw_local_irq_enable(); + + /* its possible the list is empty, just return */ + if (!tsk) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + __set_current_state(TASK_RUNNING); + continue; + } + + /* Process task list */ + while (1) { + /* save next */ + next = tsk->posix_timer_list; + + /* run the task timers, clear its ptr and + * unreference it + */ + __run_posix_cpu_timers(tsk); + tsk->posix_timer_list = NULL; + put_task_struct(tsk); + + /* check if this is the last on the list */ + if (next == tsk) + break; + tsk = next; + } + } + return 0; + +wait_to_die: + /* Wait for kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +static inline int __fastpath_timer_check(struct task_struct *tsk) +{ + /* tsk == current, ensure it is safe to use ->signal/sighand */ + if (unlikely(tsk->exit_state)) + return 0; + + if (!task_cputime_zero(&tsk->cputime_expires)) + return 1; + + if (!task_cputime_zero(&tsk->signal->cputime_expires)) + return 1; + + return 0; +} + +void run_posix_cpu_timers(struct task_struct *tsk) +{ + unsigned long cpu = smp_processor_id(); + struct task_struct *tasklist; + + BUG_ON(!irqs_disabled()); + if(!per_cpu(posix_timer_task, cpu)) + return; + /* get per-cpu references */ + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; + } else { + /* + * The list is terminated by a self-pointing + * task_struct + */ + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; + + wake_up_process(per_cpu(posix_timer_task, cpu)); + } +} + +/* + * posix_cpu_thread_call - callback that gets triggered when a CPU is added. + * Here we can start up the necessary migration thread for the new CPU. + */ +static int posix_cpu_thread_call(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + struct task_struct *p; + struct sched_param param; + + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, + "posixcputmr/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; + kthread_bind(p, cpu); + /* Must be high prio to avoid getting starved */ + param.sched_priority = MAX_RT_PRIO-1; + sched_setscheduler(p, SCHED_FIFO, ¶m); + per_cpu(posix_timer_task,cpu) = p; + break; + case CPU_ONLINE: + /* Strictly unneccessary, as first user will wake it. */ + wake_up_process(per_cpu(posix_timer_task,cpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + /* Unbind it from offline cpu so it can run. Fall thru. */ + kthread_bind(per_cpu(posix_timer_task, cpu), + cpumask_any(cpu_online_mask)); + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; + case CPU_DEAD: + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; +#endif + } + return NOTIFY_OK; +} + +/* Register at highest priority so that task migration (migrate_all_tasks) + * happens before everything else. + */ +static struct notifier_block posix_cpu_thread_notifier = { + .notifier_call = posix_cpu_thread_call, + .priority = 10 +}; + +static int __init posix_cpu_thread_init(void) +{ + void *hcpu = (void *)(long)smp_processor_id(); + /* Start one for boot CPU. */ + unsigned long cpu; + + /* init the per-cpu posix_timer_tasklets */ + for_each_possible_cpu(cpu) + per_cpu(posix_timer_tasklist, cpu) = NULL; + + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); + register_cpu_notifier(&posix_cpu_thread_notifier); + return 0; +} +early_initcall(posix_cpu_thread_init); +#else /* CONFIG_PREEMPT_RT_BASE */ +void run_posix_cpu_timers(struct task_struct *tsk) +{ + __run_posix_cpu_timers(tsk); +} +#endif /* CONFIG_PREEMPT_RT_BASE */ + /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 31ea01f42e1f..0f5d7eae61f0 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -499,6 +499,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; + int sig = event->sigev_signo; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || @@ -507,7 +508,8 @@ static struct pid *good_sigevent(sigevent_t * event) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) + (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || + sig_kernel_coredump(sig))) return NULL; return task_pid(rtn); @@ -819,6 +821,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) return overrun; } +/* + * Protected by RCU! + */ +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + if (kc->timer_set == common_timer_set) + hrtimer_wait_for_timer(&timr->it.real.timer); + else + /* FIXME: Whacky hack for posix-cpu-timers */ + schedule_timeout(1); +#endif +} + /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int @@ -896,6 +912,7 @@ retry: if (!timr) return -EINVAL; + rcu_read_lock(); kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; @@ -904,9 +921,12 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { + timer_wait_for_callback(kc, timr); rtn = NULL; // We already got the old time... + rcu_read_unlock(); goto retry; } + rcu_read_unlock(); if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) @@ -944,10 +964,15 @@ retry_delete: if (!timer) return -EINVAL; + rcu_read_lock(); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } + rcu_read_unlock(); spin_lock(¤t->sighand->siglock); list_del(&timer->list); @@ -973,8 +998,18 @@ static void itimer_delete(struct k_itimer *timer) retry_delete: spin_lock_irqsave(&timer->it_lock, flags); + /* On RT we can race with a deletion */ + if (!timer->it_signal) { + unlock_timer(timer, flags); + return; + } + if (timer_delete_hook(timer) == TIMER_RETRY) { + rcu_read_lock(); unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } list_del(&timer->list); diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 6aac4beedbbe..943c03395e46 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -109,5 +109,6 @@ void tick_setup_hrtimer_broadcast(void) { hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); bctimer.function = bc_handler; + bctimer.irqsafe = true; clockevents_register_device(&ce_broadcast_hrtimer); } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 3ae6afa1eb98..14a10917c8a3 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -78,13 +78,15 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -146,9 +148,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ktime_t next; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); next = tick_next_period; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 914259128145..b3841ba00c69 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevalute with jiffies_lock held */ - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 >= tick_period.tv64) { @@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } else { - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return; } - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Did we start the jiffies update yet ? */ if (last_jiffies_update.tv64 == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return period; } @@ -176,6 +181,11 @@ static bool can_stop_full_tick(void) return false; } + if (!arch_irq_work_has_interrupt()) { + trace_tick_stop(0, "missing irq work interrupt\n"); + return false; + } + /* sched_clock_tick() needs us? */ #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK /* @@ -222,6 +232,7 @@ static void nohz_full_kick_work_func(struct irq_work *work) static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { .func = nohz_full_kick_work_func, + .flags = IRQ_WORK_HARD_IRQ, }; /* @@ -578,10 +589,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, /* Read jiffies and the time when jiffies were updated last */ do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); last_update = last_jiffies_update; last_jiffies = jiffies; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); if (rcu_needs_cpu(&rcu_delta_jiffies) || arch_needs_cpu() || irq_work_needs_cpu()) { @@ -759,14 +770,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10 && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - pr_warn("NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); return false; } @@ -1154,6 +1158,7 @@ void tick_setup_sched_timer(void) * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ts->sched_timer.irqsafe = 1; ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d296b904685b..a991fba43282 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2087,8 +2087,10 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); do_timer(ticks); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index ead8794b9a4e..d7a9120a9f52 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h @@ -22,7 +22,8 @@ extern void timekeeping_resume(void); extern void do_timer(unsigned long ticks); extern void update_wall_time(void); -extern seqlock_t jiffies_lock; +extern raw_spinlock_t jiffies_lock; +extern seqcount_t jiffies_seq; #define CS_NAME_LEN 32 diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 2ece3aa5069c..c68ba873da3c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -78,6 +78,9 @@ struct tvec_root { struct tvec_base { spinlock_t lock; struct timer_list *running_timer; +#ifdef CONFIG_PREEMPT_RT_FULL + wait_queue_head_t wait_for_running_timer; +#endif unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; @@ -768,6 +771,36 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, } } +#ifndef CONFIG_PREEMPT_RT_FULL +static inline struct tvec_base *switch_timer_base(struct timer_list *timer, + struct tvec_base *old, + struct tvec_base *new) +{ + /* See the comment in lock_timer_base() */ + timer_set_base(timer, NULL); + spin_unlock(&old->lock); + spin_lock(&new->lock); + timer_set_base(timer, new); + return new; +} +#else +static inline struct tvec_base *switch_timer_base(struct timer_list *timer, + struct tvec_base *old, + struct tvec_base *new) +{ + /* + * We cannot do the above because we might be preempted and + * then the preempter would see NULL and loop forever. + */ + if (spin_trylock(&new->lock)) { + timer_set_base(timer, new); + spin_unlock(&old->lock); + return new; + } + return old; +} +#endif + static inline int __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only, int pinned) @@ -798,14 +831,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. */ - if (likely(base->running_timer != timer)) { - /* See the comment in lock_timer_base() */ - timer_set_base(timer, NULL); - spin_unlock(&base->lock); - base = new_base; - spin_lock(&base->lock); - timer_set_base(timer, base); - } + if (likely(base->running_timer != timer)) + base = switch_timer_base(timer, base, new_base); } timer->expires = expires; @@ -979,6 +1006,29 @@ void add_timer_on(struct timer_list *timer, int cpu) } EXPORT_SYMBOL_GPL(add_timer_on); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Wait for a running timer + */ +static void wait_for_running_timer(struct timer_list *timer) +{ + struct tvec_base *base = timer->base; + + if (base->running_timer == timer) + wait_event(base->wait_for_running_timer, + base->running_timer != timer); +} + +# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) +#else +static inline void wait_for_running_timer(struct timer_list *timer) +{ + cpu_relax(); +} + +# define wakeup_timer_waiters(b) do { } while (0) +#endif + /** * del_timer - deactive a timer. * @timer: the timer to be deactivated @@ -1036,7 +1086,7 @@ int try_to_del_timer_sync(struct timer_list *timer) } EXPORT_SYMBOL(try_to_del_timer_sync); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) static DEFINE_PER_CPU(struct tvec_base, __tvec_bases); /** @@ -1098,7 +1148,7 @@ int del_timer_sync(struct timer_list *timer) int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - cpu_relax(); + wait_for_running_timer(timer); } } EXPORT_SYMBOL(del_timer_sync); @@ -1219,15 +1269,17 @@ static inline void __run_timers(struct tvec_base *base) if (irqsafe) { spin_unlock(&base->lock); call_timer_fn(timer, fn, data); + base->running_timer = NULL; spin_lock(&base->lock); } else { spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); + base->running_timer = NULL; spin_lock_irq(&base->lock); } } } - base->running_timer = NULL; + wakeup_timer_waiters(base); spin_unlock_irq(&base->lock); } @@ -1367,6 +1419,14 @@ unsigned long get_next_timer_interrupt(unsigned long now) if (cpu_is_offline(smp_processor_id())) return expires; +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * On PREEMPT_RT we cannot sleep here. As a result we can't take + * the base lock to check when the next timer is pending and so + * we assume the next jiffy. + */ + return now + 1; +#endif spin_lock(&base->lock); if (base->active_timers) { if (time_before_eq(base->next_timer, base->timer_jiffies)) @@ -1392,13 +1452,13 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); + scheduler_tick(); run_local_timers(); rcu_check_callbacks(user_tick); -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) if (in_irq()) irq_work_tick(); #endif - scheduler_tick(); run_posix_cpu_timers(p); } @@ -1411,6 +1471,8 @@ static void run_timer_softirq(struct softirq_action *h) hrtimer_run_pending(); + irq_work_tick_soft(); + if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); } @@ -1566,7 +1628,7 @@ static void migrate_timers(int cpu) BUG_ON(cpu_online(cpu)); old_base = per_cpu(tvec_bases, cpu); - new_base = get_cpu_var(tvec_bases); + new_base = get_local_var(tvec_bases); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. @@ -1590,7 +1652,7 @@ static void migrate_timers(int cpu) spin_unlock(&old_base->lock); spin_unlock_irq(&new_base->lock); - put_cpu_var(tvec_bases); + put_local_var(tvec_bases); } static int timer_cpu_notify(struct notifier_block *self, @@ -1625,6 +1687,9 @@ static void __init init_timer_cpu(struct tvec_base *base, int cpu) base->cpu = cpu; per_cpu(tvec_bases, cpu) = base; spin_lock_init(&base->lock); +#ifdef CONFIG_PREEMPT_RT_FULL + init_waitqueue_head(&base->wait_for_running_timer); +#endif for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); |