Index: linux-3.2/include/linux/rcutree.h =================================================================== --- linux-3.2.orig/include/linux/rcutree.h +++ linux-3.2/include/linux/rcutree.h @@ -60,7 +60,7 @@ static inline void exit_rcu(void) #ifndef CONFIG_PREEMPT_RT_FULL extern void synchronize_rcu_bh(void); #else -# define synchronize_rcu_bh() synchronize_rcu() +# define synchronize_rcu_bh synchronize_rcu #endif extern void synchronize_sched_expedited(void); extern void synchronize_rcu_expedited(void); Index: linux-3.2/init/Kconfig =================================================================== --- linux-3.2.orig/init/Kconfig +++ linux-3.2/init/Kconfig @@ -410,7 +410,7 @@ config TINY_RCU config TINY_PREEMPT_RCU bool "Preemptible UP-only small-memory-footprint RCU" - depends on PREEMPT && !SMP && !PREEMPT_RT_FULL + depends on PREEMPT && !SMP help This option selects the RCU implementation that is designed for real-time UP systems. This option greatly reduces the Index: linux-3.2/kernel/softirq.c =================================================================== --- linux-3.2.orig/kernel/softirq.c +++ linux-3.2/kernel/softirq.c @@ -447,6 +447,7 @@ int in_serving_softirq(void) preempt_enable(); return res; } +EXPORT_SYMBOL(in_serving_softirq); /* * Called with bh and local interrupts disabled. For full RT cpu must Index: linux-3.2/localversion-rt =================================================================== --- linux-3.2.orig/localversion-rt +++ linux-3.2/localversion-rt @@ -1 +1 @@ --rt1 +-rt2 Index: linux-3.2/include/linux/sysctl.h =================================================================== --- linux-3.2.orig/include/linux/sysctl.h +++ linux-3.2/include/linux/sysctl.h @@ -932,6 +932,7 @@ enum #include #include #include +#include /* For the /proc/sys support */ struct ctl_table; Index: linux-3.2/kernel/rcutiny.c =================================================================== --- linux-3.2.orig/kernel/rcutiny.c +++ linux-3.2/kernel/rcutiny.c @@ -243,6 +243,7 @@ void call_rcu_sched(struct rcu_head *hea } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Post an RCU bottom-half callback to be invoked after any subsequent * quiescent state. @@ -252,3 +253,4 @@ void call_rcu_bh(struct rcu_head *head, __call_rcu(head, func, &rcu_bh_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif Index: linux-3.2/arch/x86/crypto/aesni-intel_glue.c =================================================================== --- linux-3.2.orig/arch/x86/crypto/aesni-intel_glue.c +++ linux-3.2/arch/x86/crypto/aesni-intel_glue.c @@ -289,14 +289,14 @@ static int ecb_encrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); + nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -313,14 +313,14 @@ static int ecb_decrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -359,14 +359,14 @@ static int cbc_encrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -383,14 +383,14 @@ static int cbc_decrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -445,18 +445,20 @@ static int ctr_crypt(struct blkcipher_de err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + kernel_fpu_begin(); aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { + kernel_fpu_begin(); ctr_crypt_final(ctx, &walk); + kernel_fpu_end(); err = blkcipher_walk_done(desc, &walk, 0); } - kernel_fpu_end(); return err; } Index: linux-3.2/drivers/md/dm.c =================================================================== --- linux-3.2.orig/drivers/md/dm.c +++ linux-3.2/drivers/md/dm.c @@ -1648,14 +1648,14 @@ static void dm_request_fn(struct request if (map_request(ti, clone, md)) goto requeued; - BUG_ON(!irqs_disabled()); + BUG_ON_NORT(!irqs_disabled()); spin_lock(q->queue_lock); } goto out; requeued: - BUG_ON(!irqs_disabled()); + BUG_ON_NORT(!irqs_disabled()); spin_lock(q->queue_lock); delay_and_out: