diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 742f69d18fc8..bcaf28e3b15f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1629,6 +1629,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ip= [IP_PNP] See Documentation/filesystems/nfs/nfsroot.txt. + irqaffinity= [SMP] Set the default irq affinity mask + Format: + ,..., + or + - + (must be a positive range in ascending order) + or a mixture + ,...,- + irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken diff --git a/arch/Kconfig b/arch/Kconfig index 81592b37a8f7..3b26d76933fb 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -53,6 +53,7 @@ config KPROBES config JUMP_LABEL bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL + depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) help This option enables a transparent branch optimization that makes certain almost-always-true or almost-always-false branch diff --git a/kernel/events/core.c b/kernel/events/core.c index 6430e415b7e6..fa32b9b7721d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -802,6 +802,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) raw_spin_lock_init(&cpuctx->hrtimer_lock); hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); timer->function = perf_mux_hrtimer_handler; + timer->irqsafe = 1; } static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 239e2ae2c947..0b73349a42d5 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -24,10 +24,27 @@ static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) +static int __init irq_affinity_setup(char *str) +{ + zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpulist_parse(str, irq_default_affinity); + /* + * Set at least the boot cpu. We don't want to end up with + * bugreports caused by random comandline masks + */ + cpumask_set_cpu(smp_processor_id(), irq_default_affinity); + return 1; +} +__setup("irqaffinity=", irq_affinity_setup); + static void __init init_irq_default_affinity(void) { - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); - cpumask_setall(irq_default_affinity); +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!irq_default_affinity) + zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); +#endif + if (cpumask_empty(irq_default_affinity)) + cpumask_setall(irq_default_affinity); } #else static void __init init_irq_default_affinity(void) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 079a3af04e5e..88995de2ee53 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -241,6 +241,65 @@ struct printk_log { */ static DEFINE_RAW_SPINLOCK(logbuf_lock); +#ifdef CONFIG_EARLY_PRINTK +struct console *early_console; + +static void early_vprintk(const char *fmt, va_list ap) +{ + if (early_console) { + char buf[512]; + int n = vscnprintf(buf, sizeof(buf), fmt, ap); + + early_console->write(early_console, buf, n); + } +} + +asmlinkage void early_printk(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + early_vprintk(fmt, ap); + va_end(ap); +} + +/* + * This is independent of any log levels - a global + * kill switch that turns off all of printk. + * + * Used by the NMI watchdog if early-printk is enabled. + */ +static bool __read_mostly printk_killswitch; + +static int __init force_early_printk_setup(char *str) +{ + printk_killswitch = true; + return 0; +} +early_param("force_early_printk", force_early_printk_setup); + +void printk_kill(void) +{ + printk_killswitch = true; +} + +#ifdef CONFIG_PRINTK +static int forced_early_printk(const char *fmt, va_list ap) +{ + if (!printk_killswitch) + return 0; + early_vprintk(fmt, ap); + return 1; +} +#endif + +#else +static inline int forced_early_printk(const char *fmt, va_list ap) +{ + return 0; +} +#endif + #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ @@ -1692,62 +1751,6 @@ static size_t cont_print_text(char *text, size_t size) return textlen; } -#ifdef CONFIG_EARLY_PRINTK -struct console *early_console; - -static void early_vprintk(const char *fmt, va_list ap) -{ - if (early_console) { - char buf[512]; - int n = vscnprintf(buf, sizeof(buf), fmt, ap); - - early_console->write(early_console, buf, n); - } -} - -asmlinkage void early_printk(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - early_vprintk(fmt, ap); - va_end(ap); -} - -/* - * This is independent of any log levels - a global - * kill switch that turns off all of printk. - * - * Used by the NMI watchdog if early-printk is enabled. - */ -static bool __read_mostly printk_killswitch; - -static int __init force_early_printk_setup(char *str) -{ - printk_killswitch = true; - return 0; -} -early_param("force_early_printk", force_early_printk_setup); - -void printk_kill(void) -{ - printk_killswitch = true; -} - -static int forced_early_printk(const char *fmt, va_list ap) -{ - if (!printk_killswitch) - return 0; - early_vprintk(fmt, ap); - return 1; -} -#else -static inline int forced_early_printk(const char *fmt, va_list ap) -{ - return 0; -} -#endif - asmlinkage int vprintk_emit(int facility, int level, const char *dict, size_t dictlen, const char *fmt, va_list args) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7a13fbc28454..1217926b500d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3180,7 +3180,6 @@ void migrate_disable(void) preempt_lazy_disable(); pin_current_cpu(); p->migrate_disable = 1; - p->nr_cpus_allowed = 1; preempt_enable(); } EXPORT_SYMBOL(migrate_disable); diff --git a/localversion-rt b/localversion-rt index ad3da1bcab7e..0efe7ba1930e 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt4 +-rt5 diff --git a/net/core/dev.c b/net/core/dev.c index e902501a4d12..c82f163fc174 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2983,7 +2983,7 @@ static inline void xmit_rec_inc(void) __this_cpu_inc(xmit_recursion); } -static inline int xmit_rec_dec(void) +static inline void xmit_rec_dec(void) { __this_cpu_dec(xmit_recursion); }