Index: linux-stable/arch/arm/Kconfig =================================================================== --- linux-stable.orig/arch/arm/Kconfig +++ linux-stable/arch/arm/Kconfig @@ -50,6 +50,7 @@ config ARM select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN + select HAVE_PREEMPT_LAZY help The ARM series is a line of low-power-consumption RISC chip designs licensed by ARM Ltd and targeted at embedded applications and Index: linux-stable/arch/powerpc/Kconfig =================================================================== --- linux-stable.orig/arch/powerpc/Kconfig +++ linux-stable/arch/powerpc/Kconfig @@ -140,6 +140,7 @@ config PPC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_PREEMPT_LAZY config EARLY_PRINTK bool Index: linux-stable/arch/x86/include/asm/thread_info.h =================================================================== --- linux-stable.orig/arch/x86/include/asm/thread_info.h +++ linux-stable/arch/x86/include/asm/thread_info.h @@ -159,6 +159,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #define PREEMPT_ACTIVE 0x10000000 #ifdef CONFIG_X86_32 Index: linux-stable/arch/x86/kernel/entry_32.S =================================================================== --- linux-stable.orig/arch/x86/kernel/entry_32.S +++ linux-stable/arch/x86/kernel/entry_32.S @@ -349,20 +349,22 @@ ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_all -need_resched: movl TI_flags(%ebp), %ecx # need_resched set ? testb $_TIF_NEED_RESCHED, %cl jnz 1f cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? jnz restore_all - testb $_TIF_NEED_RESCHED_LAZY, %cl + testl $_TIF_NEED_RESCHED_LAZY, %cx jz restore_all 1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq - jmp need_resched + movl TI_flags(%ebp), %ecx # need_resched set ? + testl $_TIF_NEED_RESCHED_MASK, %cl + jnz 1b + jmp restore_all END(resume_kernel) #endif CFI_ENDPROC @@ -595,7 +597,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: - testb $_TIF_NEED_RESCHED, %cl + testl $_TIF_NEED_RESCHED_MASK, %cl jz work_notifysig work_resched: call schedule @@ -608,7 +610,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all - testb $_TIF_NEED_RESCHED, %cl + testl $_TIF_NEED_RESCHED_MASK, %cl jnz work_resched work_notifysig: # deal with pending signals and Index: linux-stable/arch/x86/kernel/entry_64.S =================================================================== --- linux-stable.orig/arch/x86/kernel/entry_64.S +++ linux-stable/arch/x86/kernel/entry_64.S @@ -560,8 +560,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: - bt $TIF_NEED_RESCHED,%edx - jnc sysret_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz sysret_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -673,8 +673,8 @@ GLOBAL(int_with_check) /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: - bt $TIF_NEED_RESCHED,%edx - jnc int_very_careful + testl $_TIF_NEED_RESCHED_MASK,%edx + jz int_very_careful TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -969,8 +969,8 @@ bad_iret: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE - bt $TIF_NEED_RESCHED,%edx - jnc retint_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz retint_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1443,7 +1443,7 @@ paranoid_userspace: movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ - testl $_TIF_NEED_RESCHED,%ebx + testl $_TIF_NEED_RESCHED_MASK,%ebx jnz paranoid_schedule movl %ebx,%edx /* arg3: thread flags */ TRACE_IRQS_ON Index: linux-stable/drivers/of/base.c =================================================================== --- linux-stable.orig/drivers/of/base.c +++ linux-stable/drivers/of/base.c @@ -417,7 +417,7 @@ struct device_node *of_get_next_availabl { struct device_node *next; - read_lock(&devtree_lock); + raw_spin_lock(&devtree_lock); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) { if (!of_device_is_available(next)) @@ -426,7 +426,7 @@ struct device_node *of_get_next_availabl break; } of_node_put(prev); - read_unlock(&devtree_lock); + raw_spin_unlock(&devtree_lock); return next; } EXPORT_SYMBOL(of_get_next_available_child); Index: linux-stable/include/linux/preempt.h =================================================================== --- linux-stable.orig/include/linux/preempt.h +++ linux-stable/include/linux/preempt.h @@ -23,7 +23,7 @@ #define preempt_count() (current_thread_info()->preempt_count) -#ifdef CONFIG_HAVE_PREEMPT_LAZY +#ifdef CONFIG_PREEMPT_LAZY #define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) #define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) #define inc_preempt_lazy_count() add_preempt_lazy_count(1) @@ -41,7 +41,7 @@ asmlinkage void preempt_schedule(void); -# ifdef CONFIG_HAVE_PREEMPT_LAZY +# ifdef CONFIG_PREEMPT_LAZY #define preempt_check_resched() \ do { \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \ Index: linux-stable/include/linux/sched.h =================================================================== --- linux-stable.orig/include/linux/sched.h +++ linux-stable/include/linux/sched.h @@ -2660,7 +2660,7 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } -#ifdef CONFIG_HAVE_PREEMPT_LAZY +#ifdef CONFIG_PREEMPT_LAZY static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) { set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); Index: linux-stable/kernel/Kconfig.preempt =================================================================== --- linux-stable.orig/kernel/Kconfig.preempt +++ linux-stable/kernel/Kconfig.preempt @@ -9,6 +9,9 @@ config PREEMPT_RT_BASE config HAVE_PREEMPT_LAZY bool +config PREEMPT_LAZY + def_bool y if HAVE_PREEMPT_LAZE && PREEMPT_RT_FULL + choice prompt "Preemption Model" default PREEMPT_NONE Index: linux-stable/kernel/irq/manage.c =================================================================== --- linux-stable.orig/kernel/irq/manage.c +++ linux-stable/kernel/irq/manage.c @@ -855,7 +855,7 @@ static int irq_thread(void *data) #ifdef CONFIG_PREEMPT_RT_FULL migrate_disable(); add_interrupt_randomness(action->irq, 0, - desc->random_ip ^ (u64) action); + desc->random_ip ^ (unsigned long) action); migrate_enable(); #endif wake_threads_waitq(desc); Index: linux-stable/kernel/sched/core.c =================================================================== --- linux-stable.orig/kernel/sched/core.c +++ linux-stable/kernel/sched/core.c @@ -534,7 +534,7 @@ void resched_task(struct task_struct *p) smp_send_reschedule(cpu); } -#ifdef CONFIG_HAVE_PREEMPT_LAZY +#ifdef CONFIG_PREEMPT_LAZY void resched_task_lazy(struct task_struct *p) { int cpu; @@ -681,7 +681,7 @@ void resched_task(struct task_struct *p) assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } -#ifdef CONFIG_HAVE_PREEMPT_LAZY +#ifdef CONFIG_PREEMPT_LAZY void resched_task_lazy(struct task_struct *p) { if (!sched_feat(PREEMPT_LAZY)) { @@ -1880,6 +1880,9 @@ void sched_fork(struct task_struct *p) /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(idle)->preempt_lazy_count = 0; +#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); #endif @@ -3769,7 +3772,7 @@ asmlinkage void __sched notrace preempt_ if (likely(ti->preempt_count || irqs_disabled())) return; -#ifdef CONFIG_HAVE_PREEMPT_LAZY +#ifdef CONFIG_PREEMPT_LAZY /* * Check for lazy preemption */ @@ -5384,8 +5387,9 @@ void __cpuinit init_idle(struct task_str /* Set the preempt count _outside_ the spinlocks! */ task_thread_info(idle)->preempt_count = 0; +#ifdef CONFIG_HAVE_PREEMPT_LAZY task_thread_info(idle)->preempt_lazy_count = 0; - +#endif /* * The idle tasks have their own, simple scheduling class: */ Index: linux-stable/kernel/sched/features.h =================================================================== --- linux-stable.orig/kernel/sched/features.h +++ linux-stable/kernel/sched/features.h @@ -68,7 +68,9 @@ SCHED_FEAT(NONTASK_POWER, true) SCHED_FEAT(TTWU_QUEUE, true) #else SCHED_FEAT(TTWU_QUEUE, false) +# ifdef CONFIG_PREEMPT_LAZY SCHED_FEAT(PREEMPT_LAZY, true) +# endif #endif SCHED_FEAT(FORCE_SD_OVERLAP, false) Index: linux-stable/kernel/sched/sched.h =================================================================== --- linux-stable.orig/kernel/sched/sched.h +++ linux-stable/kernel/sched/sched.h @@ -876,7 +876,7 @@ extern void init_sched_fair_class(void); extern void resched_task(struct task_struct *p); extern void resched_cpu(int cpu); -#ifdef CONFIG_HAVE_PREEMPT_LAZY +#ifdef CONFIG_PREEMPT_LAZY extern void resched_task_lazy(struct task_struct *tsk); #else static inline void resched_task_lazy(struct task_struct *tsk) Index: linux-stable/localversion-rt =================================================================== --- linux-stable.orig/localversion-rt +++ linux-stable/localversion-rt @@ -1 +1 @@ --rt11 +-rt12 Index: linux-stable/arch/arm/include/asm/thread_info.h =================================================================== --- linux-stable.orig/arch/arm/include/asm/thread_info.h +++ linux-stable/arch/arm/include/asm/thread_info.h @@ -50,6 +50,7 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ @@ -146,6 +147,7 @@ extern int vfp_restore_user_hwstate(stru #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_NEED_RESCHED_LAZY 3 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_POLLING_NRFLAG 16 @@ -158,6 +160,7 @@ extern int vfp_restore_user_hwstate(stru #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) Index: linux-stable/arch/arm/kernel/asm-offsets.c =================================================================== --- linux-stable.orig/arch/arm/kernel/asm-offsets.c +++ linux-stable/arch/arm/kernel/asm-offsets.c @@ -50,6 +50,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); Index: linux-stable/arch/arm/kernel/entry-armv.S =================================================================== --- linux-stable.orig/arch/arm/kernel/entry-armv.S +++ linux-stable/arch/arm/kernel/entry-armv.S @@ -221,6 +221,12 @@ __irq_svc: movne r0, #0 @ force flags to 0 tst r0, #_TIF_NEED_RESCHED blne svc_preempt + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt lazy count != 0 + movne r0, #0 @ force flags to 0 + tst r0, #_TIF_NEED_RESCHED_LAZY + blne svc_preempt #endif #ifdef CONFIG_TRACE_IRQFLAGS @@ -240,6 +246,8 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED + bne 1b + tst r0, #_TIF_NEED_RESCHED_LAZY moveq pc, r8 @ go again b 1b #endif Index: linux-stable/arch/arm/kernel/signal.c =================================================================== --- linux-stable.orig/arch/arm/kernel/signal.c +++ linux-stable/arch/arm/kernel/signal.c @@ -639,7 +639,8 @@ asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { do { - if (likely(thread_flags & _TIF_NEED_RESCHED)) { + if (likely(thread_flags & (_TIF_NEED_RESCHED | + _TIF_NEED_RESCHED_LAZY))) { schedule(); } else { if (unlikely(!user_mode(regs))) Index: linux-stable/arch/powerpc/include/asm/thread_info.h =================================================================== --- linux-stable.orig/arch/powerpc/include/asm/thread_info.h +++ linux-stable/arch/powerpc/include/asm/thread_info.h @@ -43,6 +43,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_lazy_count; /* 0 => preemptable, + <0 => BUG */ struct restart_block restart_block; unsigned long local_flags; /* private flags for thread */ @@ -102,12 +104,14 @@ static inline struct thread_info *curren #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ +#define TIF_NEED_RESCHED_LAZY 14 /* lazy rescheduling necessary */ #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<