diff --git a/arch/Kconfig b/arch/Kconfig index 73fc7cd51582..5e921bd9a57b 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -46,6 +46,7 @@ config KPROBES config JUMP_LABEL bool "Optimize trace point call sites" depends on HAVE_ARCH_JUMP_LABEL + depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) help If it is detected that the compiler has support for "asm goto", the kernel will compile trace point locations with just a diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a3ebb09d4283..3c87797e371e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -341,6 +341,18 @@ static inline void __ftrace_enabled_restore(int enabled) # endif #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ +static inline unsigned long get_lock_parent_ip(void) +{ + unsigned long addr = CALLER_ADDR0; + + if (!in_lock_functions(addr)) + return addr; + addr = CALLER_ADDR1; + if (!in_lock_functions(addr)) + return addr; + return CALLER_ADDR2; +} + #ifdef CONFIG_IRQSOFF_TRACER extern void time_hardirqs_on(unsigned long a0, unsigned long a1); extern void time_hardirqs_off(unsigned long a0, unsigned long a1); diff --git a/include/linux/sched.h b/include/linux/sched.h index a15cfd1bac9f..3cb870f1ffc1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -149,8 +149,6 @@ extern unsigned long this_cpu_load(void); extern void calc_global_load(unsigned long ticks); extern void update_cpu_load_nohz(void); -extern unsigned long get_parent_ip(unsigned long addr); - struct seq_file; struct cfs_rq; struct task_group; diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 764825c2685c..3cd7834c7da6 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -455,6 +455,7 @@ static struct rcu_torture_ops rcu_expedited_ops = { .name = "rcu_expedited" }; +#ifndef CONFIG_PREEMPT_RT_FULL /* * Definitions for rcu_bh torture testing. */ @@ -528,6 +529,12 @@ static struct rcu_torture_ops rcu_bh_expedited_ops = { .name = "rcu_bh_expedited" }; +#else +static struct rcu_torture_ops rcu_bh_ops = { + .ttype = INVALID_RCU_FLAVOR, +}; +#endif + /* * Definitions for srcu torture testing. */ diff --git a/kernel/sched.c b/kernel/sched.c index a9f6d6c0ab93..abc27a937c1b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4460,16 +4460,6 @@ void scheduler_tick(void) #endif } -notrace unsigned long get_parent_ip(unsigned long addr) -{ - if (in_lock_functions(addr)) { - addr = CALLER_ADDR2; - if (in_lock_functions(addr)) - addr = CALLER_ADDR3; - } - return addr; -} - #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_PREEMPT_TRACER)) @@ -4491,7 +4481,7 @@ void __kprobes add_preempt_count(int val) PREEMPT_MASK - 10); #endif if (preempt_count() == val) { - unsigned long ip = get_parent_ip(CALLER_ADDR1); + unsigned long ip = get_lock_parent_ip(); #ifdef CONFIG_DEBUG_PREEMPT current->preempt_disable_ip = ip; #endif @@ -4517,7 +4507,7 @@ void __kprobes sub_preempt_count(int val) #endif if (preempt_count() == val) - trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); preempt_count() -= val; } EXPORT_SYMBOL(sub_preempt_count); @@ -4611,7 +4601,7 @@ void migrate_disable(void) { struct task_struct *p = current; - if (in_atomic()) { + if (in_atomic() || irqs_disabled()) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic++; #endif @@ -4642,7 +4632,7 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; - if (in_atomic()) { + if (in_atomic() || irqs_disabled()) { #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic--; #endif diff --git a/kernel/softirq.c b/kernel/softirq.c index b7d68392e833..b03c01c77d92 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -207,7 +207,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) raw_local_irq_restore(flags); if (preempt_count() == cnt) - trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); } #else /* !CONFIG_TRACE_IRQFLAGS */ static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) @@ -561,10 +561,10 @@ static int __thread_do_softirq(int cpu) */ if (local_softirq_pending()) __do_softirq_common(cpu >= 0); - local_unlock(local_softirq_lock); unpin_current_cpu(); - preempt_disable(); local_irq_enable(); + local_unlock(local_softirq_lock); + preempt_disable(); return 0; } diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c index 6a4c8694c55a..900046843068 100644 --- a/kernel/trace/latency_hist.c +++ b/kernel/trace/latency_hist.c @@ -114,7 +114,7 @@ static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); static char *wakeup_latency_hist_dir = "wakeup"; static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; static notrace void probe_wakeup_latency_hist_start(void *v, - struct task_struct *p, int success); + struct task_struct *p); static notrace void probe_wakeup_latency_hist_stop(void *v, struct task_struct *prev, struct task_struct *next); static notrace void probe_sched_migrate_task(void *, @@ -868,7 +868,7 @@ static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, } static notrace void probe_wakeup_latency_hist_start(void *v, - struct task_struct *p, int success) + struct task_struct *p) { unsigned long flags; struct task_struct *curr = current; diff --git a/localversion-rt b/localversion-rt index 9969a4b69fad..0c40e2660574 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt111 +-rt112