diff -urN linux-2.2.7/include/linux/sched.h linux/include/linux/sched.h --- linux-2.2.7/include/linux/sched.h Sat Apr 17 07:28:10 1999 +++ linux/include/linux/sched.h Thu May 6 09:47:03 1999 @@ -237,6 +237,7 @@ int last_processor; int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */ struct task_struct *next_task, *prev_task; + struct task_struct *run_head; /* The run queue I am destined for */ struct task_struct *next_run, *prev_run; /* task state */ @@ -349,7 +350,7 @@ /* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \ /* counter */ DEF_PRIORITY,DEF_PRIORITY,0, \ /* SMP */ 0,0,0,-1, \ -/* schedlink */ &init_task,&init_task, &init_task, &init_task, \ +/* schedlink */ &init_task,&init_task, &init_task, &init_task, &init_task, \ /* binfmt */ NULL, \ /* ec,brk... */ 0,0,0,0,0,0, \ /* pid etc.. */ 0,0,0,0,0, \ diff -urN linux-2.2.7/kernel/sched.c linux/kernel/sched.c --- linux-2.2.7/kernel/sched.c Tue Mar 16 11:11:55 1999 +++ linux/kernel/sched.c Thu May 6 10:10:39 1999 @@ -96,6 +96,17 @@ struct task_struct * task[NR_TASKS] = {&init_task, }; +/* Pseudo-idle task for RT run queue. I'm not entirely happy with allocating + a whole task structure, but this has the advantage of avoiding an extra + level of indirection in the scheduler inner loops. I may get around to + doing a partial allocation here (evil). rgooch@atnf.csiro.au +*/ +static struct task_struct rt_idle = { + rt_priority: -1000, + prev_run: &rt_idle, + next_run: &rt_idle, +}; + struct kernel_stat kstat = { 0 }; void scheduling_functions_start_here(void) { } @@ -227,10 +238,11 @@ */ static inline void add_to_runqueue(struct task_struct * p) { - struct task_struct *next = init_task.next_run; + struct task_struct *head = p->run_head; + struct task_struct *next = head->next_run; - p->prev_run = &init_task; - init_task.next_run = p; + p->prev_run = head; + head->next_run = p; p->next_run = next; next->prev_run = p; nr_running++; @@ -250,6 +262,7 @@ static inline void move_last_runqueue(struct task_struct * p) { + struct task_struct *head = p->run_head; struct task_struct *next = p->next_run; struct task_struct *prev = p->prev_run; @@ -257,15 +270,16 @@ next->prev_run = prev; prev->next_run = next; /* add back to list */ - p->next_run = &init_task; - prev = init_task.prev_run; - init_task.prev_run = p; + p->next_run = head; + prev = head->prev_run; + head->prev_run = p; p->prev_run = prev; prev->next_run = p; } static inline void move_first_runqueue(struct task_struct * p) { + struct task_struct *head = p->run_head; struct task_struct *next = p->next_run; struct task_struct *prev = p->prev_run; @@ -273,9 +287,9 @@ next->prev_run = prev; prev->next_run = next; /* add back to list */ - p->prev_run = &init_task; - next = init_task.next_run; - init_task.next_run = p; + p->prev_run = head; + next = head->next_run; + head->next_run = p; p->next_run = next; next->prev_run = p; } @@ -347,14 +361,6 @@ } /* - * Realtime process, select the first one on the - * runqueue (taking priorities within processes - * into account). - */ - if (policy != SCHED_OTHER) - return 1000 + p->rt_priority; - - /* * Give the process a first-approximation goodness value * according to the number of clock-ticks it has left. * @@ -693,7 +699,8 @@ /* this is the scheduler proper: */ { - struct task_struct * p = init_task.next_run; + struct task_struct * p = rt_idle.next_run; + struct task_struct * op = init_task.next_run; int c = -1000; /* Default process to select.. */ @@ -719,6 +726,16 @@ * interrupts are enabled. However, they will be put on front of the * list, so our list starting at "p" is essentially fixed. */ + /* First scan for RT process to run: ignore others */ + while (p != &rt_idle) { + if (can_schedule(p)) { + int weight = prev->rt_priority; + if (weight > c) + c = weight, next = p; + } + p = p->next_run; + } + p = (next == idle_task) ? op : &init_task; while (p != &init_task) { if (can_schedule(p)) { int weight = goodness(p, prev, this_cpu); @@ -1609,7 +1626,7 @@ struct sched_param *param) { struct sched_param lp; - struct task_struct *p; + struct task_struct *p, *head; int retval; retval = -EINVAL; @@ -1663,8 +1680,14 @@ retval = 0; p->policy = policy; p->rt_priority = lp.sched_priority; - if (p->next_run) - move_first_runqueue(p); + head = (policy == SCHED_OTHER) ? &init_task : &rt_idle; + if (p->next_run) { + del_from_runqueue(p); + p->run_head = head; + add_to_runqueue(p); + nr_running++; + } + else p->run_head = head; current->need_resched = 1;