diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 2899ba0d23d1..abc65de5d793 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -57,6 +57,12 @@ void __weak arch_irq_work_raise(void) */ } +static inline bool use_lazy_list(struct irq_work *work) +{ + return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) + || (work->flags & IRQ_WORK_LAZY); +} + #ifdef CONFIG_SMP /* * Enqueue the irq_work @work on @cpu unless it's already pending @@ -78,7 +84,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (!irq_work_claim(work)) return false; - if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) + if (use_lazy_list(work)) list = &per_cpu(lazy_list, cpu); else list = &per_cpu(raised_list, cpu); @@ -95,7 +101,7 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on); bool irq_work_queue(struct irq_work *work) { struct llist_head *list; - bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); + int lazy_work; /* Only queue if not already pending */ if (!irq_work_claim(work)) @@ -106,7 +112,7 @@ bool irq_work_queue(struct irq_work *work) lazy_work = work->flags & IRQ_WORK_LAZY; - if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) + if (use_lazy_list(work)) list = this_cpu_ptr(&lazy_list); else list = this_cpu_ptr(&raised_list); diff --git a/localversion-rt b/localversion-rt index 9bc39cfc747c..7cbd4fa29217 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt196 +-rt197