diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 3f7d3b5b0717..afb62c7cc494 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1457,7 +1457,11 @@ static int hrtimer_rt_defer(struct hrtimer *timer) #else -static inline void hrtimer_rt_run_pending(void) { } +static inline void hrtimer_rt_run_pending(void) +{ + hrtimer_peek_ahead_timers(); +} + static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1199054adb2e..5ba55a8b26e0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1325,12 +1325,6 @@ out: } } - /* - * Clear PF_THREAD_BOUND, otherwise we wreckage - * migrate_disable/enable. See optimization for - * PF_THREAD_BOUND tasks there. - */ - p->flags &= ~PF_THREAD_BOUND; return dest_cpu; } @@ -3580,9 +3574,8 @@ need_resched: static inline void sched_submit_work(struct task_struct *tsk) { - if (!tsk->state || tsk_is_pi_blocked(tsk)) + if (!tsk->state) return; - /* * If a worker went to sleep, notify and ask workqueue whether * it wants to wake up a task to maintain concurrency. @@ -3592,6 +3585,10 @@ static inline void sched_submit_work(struct task_struct *tsk) if (tsk->flags & PF_WQ_WORKER && !tsk->saved_state) wq_worker_sleeping(tsk); + + if (tsk_is_pi_blocked(tsk)) + return; + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4d21bfdc1637..653d7fccb762 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -319,6 +319,31 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, (cpu) < WORK_CPU_NONE; \ (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) +#ifdef CONFIG_PREEMPT_RT_BASE +static inline void rt_lock_idle_list(struct global_cwq *gcwq) +{ + preempt_disable(); +} +static inline void rt_unlock_idle_list(struct global_cwq *gcwq) +{ + preempt_enable(); +} +static inline void sched_lock_idle_list(struct global_cwq *gcwq) { } +static inline void sched_unlock_idle_list(struct global_cwq *gcwq) { } +#else +static inline void rt_lock_idle_list(struct global_cwq *gcwq) { } +static inline void rt_unlock_idle_list(struct global_cwq *gcwq) { } +static inline void sched_lock_idle_list(struct global_cwq *gcwq) +{ + spin_lock_irq(&gcwq->lock); +} +static inline void sched_unlock_idle_list(struct global_cwq *gcwq) +{ + spin_unlock_irq(&gcwq->lock); +} +#endif + + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -650,10 +675,16 @@ static struct worker *first_worker(struct global_cwq *gcwq) */ static void wake_up_worker(struct global_cwq *gcwq) { - struct worker *worker = first_worker(gcwq); + struct worker *worker; + + rt_lock_idle_list(gcwq); + + worker = first_worker(gcwq); if (likely(worker)) wake_up_process(worker->task); + + rt_unlock_idle_list(gcwq); } /** @@ -696,7 +727,6 @@ void wq_worker_sleeping(struct task_struct *task) cpu = smp_processor_id(); gcwq = get_gcwq(cpu); - spin_lock_irq(&gcwq->lock); /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). @@ -704,11 +734,10 @@ void wq_worker_sleeping(struct task_struct *task) */ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) && !list_empty(&gcwq->worklist)) { - worker = first_worker(gcwq); - if (worker) - wake_up_process(worker->task); + sched_lock_idle_list(gcwq); + wake_up_worker(gcwq); + sched_unlock_idle_list(gcwq); } - spin_unlock_irq(&gcwq->lock); } /** diff --git a/localversion-rt b/localversion-rt index fff72aad59ab..8188d85cffe0 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt120 +-rt121