Name: Centralize Calls to set_task_cpu() on Fork Status: Booted on 2.6.6-rc2-mm2 Depends: Version: -mm copy_process copies the ->cpu field from the parent in dup_task_struct (the threadinfo assignment). Setting it again is redundant, except for the cpu hotplug case (where the cpu might have gone away while copy_process was preempted, and the cleanup code won't have found this task which isn't in the task list yet). So only do the set_task_cpu() when actually moving task, and check for offlining in copy_process() just before inserting in task list. The change to wake_up_forked_process() is a little subtle: it currently compares the current cpu with others. This changes it to compare the cpu the task is currently on (usually the same, but if we've been preempted it could differ). diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .4889-linux-2.6.6-rc2-mm2/kernel/fork.c .4889-linux-2.6.6-rc2-mm2.updated/kernel/fork.c --- .4889-linux-2.6.6-rc2-mm2/kernel/fork.c 2004-04-29 20:33:42.000000000 +1000 +++ .4889-linux-2.6.6-rc2-mm2.updated/kernel/fork.c 2004-04-29 20:33:43.000000000 +1000 @@ -1084,6 +1084,11 @@ struct task_struct *copy_process(unsigne spin_unlock(¤t->sighand->siglock); } + /* CPU might have gone down while we were preempted: put on + * live one. */ + if (cpu_is_offline(task_cpu(p))) + set_task_cpu(p, smp_processor_id()); + SET_LINKS(p); if (p->ptrace & PT_PTRACED) __ptrace_link(p, current->parent); @@ -1224,13 +1229,7 @@ long do_fork(unsigned long clone_flags, else wake_up_forked_process(p); } else { - int cpu = get_cpu(); - p->state = TASK_STOPPED; - if (unlikely(cpu_is_offline(task_cpu(p)))) - set_task_cpu(p, cpu); - - put_cpu(); } ++total_forks; diff -urpN --exclude TAGS -X /home/rusty/devel/kernel/kernel-patches/current-dontdiff --minimal .4889-linux-2.6.6-rc2-mm2/kernel/sched.c .4889-linux-2.6.6-rc2-mm2.updated/kernel/sched.c --- .4889-linux-2.6.6-rc2-mm2/kernel/sched.c 2004-04-29 20:33:42.000000000 +1000 +++ .4889-linux-2.6.6-rc2-mm2.updated/kernel/sched.c 2004-04-29 20:35:05.000000000 +1000 @@ -1072,7 +1072,6 @@ void fastcall wake_up_forked_process(tas p->interactive_credit = 0; p->prio = effective_prio(p); - set_task_cpu(p, smp_processor_id()); if (unlikely(!current->array)) __activate_task(p, rq); @@ -1343,29 +1342,30 @@ static int find_idlest_cpu(struct task_s * that must be done for every newly created context, and it also does * runqueue balancing. */ -void fastcall wake_up_forked_thread(task_t * p) +void fastcall wake_up_forked_thread(task_t *p) { unsigned long flags; - int this_cpu = get_cpu(), cpu; + int old_cpu = task_cpu(p), cpu; struct sched_domain *tmp, *sd = NULL; - runqueue_t *this_rq = cpu_rq(this_cpu), *rq; + runqueue_t *old_rq = cpu_rq(old_cpu), *rq; + preempt_disable(); /* No cpus can go down here. */ /* * Find the largest domain that this CPU is part of that * is willing to balance on clone: */ - for_each_domain(this_cpu, tmp) + for_each_domain(old_cpu, tmp) if (tmp->flags & SD_BALANCE_CLONE) sd = tmp; if (sd) - cpu = find_idlest_cpu(p, this_cpu, sd); + cpu = find_idlest_cpu(p, old_cpu, sd); else - cpu = this_cpu; + cpu = old_cpu; local_irq_save(flags); lock_again: rq = cpu_rq(cpu); - double_rq_lock(this_rq, rq); + double_rq_lock(old_rq, rq); BUG_ON(p->state != TASK_RUNNING); @@ -1375,8 +1375,8 @@ lock_again: * in this case: */ if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) { - cpu = this_cpu; - double_rq_unlock(this_rq, rq); + cpu = old_cpu; + double_rq_unlock(old_rq, rq); goto lock_again; } /* @@ -1393,9 +1393,8 @@ lock_again: p->interactive_credit = 0; p->prio = effective_prio(p); - set_task_cpu(p, cpu); - if (cpu == this_cpu) { + if (cpu == old_cpu) { if (unlikely(!current->array)) __activate_task(p, rq); else { @@ -1406,15 +1405,16 @@ lock_again: rq->nr_running++; } } else { + set_task_cpu(p, cpu); schedstat_inc(sd, sbc_pushed); __activate_task(p, rq); if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } - double_rq_unlock(this_rq, rq); + double_rq_unlock(old_rq, rq); local_irq_restore(flags); - put_cpu(); + preempt_enable(); } /*