diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index a104cf91e6b90..d40708e8c5d6e 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -472,14 +472,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, smp_call_func_t func) { - int cpu; struct rcu_node *rnp; trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); sync_exp_reset_tree(rsp); trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select")); - cpus_read_lock(); /* Schedule work for each leaf rcu_node structure. */ rcu_for_each_leaf_node(rsp, rnp) { rnp->exp_need_flush = false; @@ -494,11 +492,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, continue; } INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); - cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); - /* If all offline, queue the work on an unbound CPU. */ - if (unlikely(cpu > rnp->grphi)) - cpu = WORK_CPU_UNBOUND; - queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); + queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); rnp->exp_need_flush = true; } @@ -506,7 +500,6 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, rcu_for_each_leaf_node(rsp, rnp) if (rnp->exp_need_flush) flush_work(&rnp->rew.rew_work); - cpus_read_unlock(); } static void synchronize_sched_expedited_wait(struct rcu_state *rsp) diff --git a/localversion-rt b/localversion-rt index 700c857efd9ba..22746d6390a42 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt8 +-rt9