diff -urN 2.3.42aa3/kernel/sched.c sched_smp/kernel/sched.c --- 2.3.42aa3/kernel/sched.c Sat Feb 5 17:13:02 2000 +++ sched_smp/kernel/sched.c Sat Feb 5 17:06:12 2000 @@ -199,30 +199,17 @@ goto send_now; /* - * The only heuristics - we use the tsk->avg_slice value - * to detect 'frequent reschedulers'. - * - * If both the woken-up process and the preferred CPU is - * is a frequent rescheduler, then skip the asynchronous - * wakeup, the frequent rescheduler will likely chose this - * task during it's next schedule(): - */ - if (p->policy == SCHED_OTHER) { - tsk = cpu_curr(best_cpu); - if (p->avg_slice + tsk->avg_slice < cacheflush_time) - goto out_no_target; - } - - /* * We know that the preferred CPU has a cache-affine current * process, lets try to find a new idle CPU for the woken-up * process: */ - for (i = 0; i < smp_num_cpus; i++) { + for (i = smp_num_cpus - 1; i >= 0; i--) { cpu = cpu_logical_map(i); + if (cpu == best_cpu) + continue; tsk = cpu_curr(cpu); /* - * We use the first available idle CPU. This creates + * We use the last available idle CPU. This creates * a priority list between idle CPUs, but this is not * a problem. */ @@ -232,26 +219,32 @@ /* * No CPU is idle, but maybe this process has enough priority - * to preempt it's preferred CPU. (this is a shortcut): + * to preempt it's preferred CPU. */ tsk = cpu_curr(best_cpu); if (preemption_goodness(tsk, p, best_cpu) > 0) goto send_now; /* - * We should get here rarely - or in the high CPU contention + * We will get here often - or in the high CPU contention * case. No CPU is idle and this process is either lowprio or - * the preferred CPU is highprio. Maybe some other CPU can/must - * be preempted: + * the preferred CPU is highprio. Try to preemt some other CPU + * only if it's RT or if it's iteractive and the preferred + * cpu won't reschedule shortly. */ - for (i = 0; i < smp_num_cpus; i++) { - cpu = cpu_logical_map(i); - tsk = cpu_curr(cpu); - if (preemption_goodness(tsk, p, cpu) > 0) - goto send_now; + if ((p->avg_slice < cacheflush_time && cpu_curr(best_cpu)->avg_slice > cacheflush_time) || + p->policy != SCHED_OTHER) + { + for (i = smp_num_cpus - 1; i >= 0; i--) { + cpu = cpu_logical_map(i); + if (cpu == best_cpu) + continue; + tsk = cpu_curr(cpu); + if (preemption_goodness(tsk, p, cpu) > 0) + goto send_now; + } } -out_no_target: spin_unlock_irqrestore(&runqueue_lock, flags); return;