diff -urpN wli-2.6.0-test8-41/arch/i386/kernel/process.c wli-2.6.0-test8-41-wchan/arch/i386/kernel/process.c --- wli-2.6.0-test8-41/arch/i386/kernel/process.c 2003-10-28 21:24:16.000000000 -0800 +++ wli-2.6.0-test8-41-wchan/arch/i386/kernel/process.c 2003-11-09 07:07:39.000000000 -0800 @@ -649,10 +649,6 @@ out: /* * These bracket the sleeping functions.. */ -void scheduling_functions_start_here(void); -void scheduling_functions_end_here(void); -#define first_sched ((unsigned long) scheduling_functions_start_here) -#define last_sched ((unsigned long) scheduling_functions_end_here) #define top_esp (THREAD_SIZE - sizeof(unsigned long)) #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) @@ -673,15 +669,14 @@ unsigned long get_wchan(task_t *task) if (ebp < stack_page || ebp > top_ebp + stack_page) return 0; eip = *(unsigned long *) (ebp+4); - if (eip < first_sched || eip >= last_sched) + if (eip < scheduling_functions_start_here || + eip >= scheduling_functions_end_here) return eip; ebp = *(unsigned long *) ebp; } while (count++ < 16); return 0; } EXPORT_SYMBOL(get_wchan); -#undef last_sched -#undef first_sched /* * sys_alloc_thread_area: get a yet unused TLS descriptor index. diff -urpN wli-2.6.0-test8-41/arch/i386/kernel/vmlinux.lds.S wli-2.6.0-test8-41-wchan/arch/i386/kernel/vmlinux.lds.S --- wli-2.6.0-test8-41/arch/i386/kernel/vmlinux.lds.S 2003-10-28 21:24:16.000000000 -0800 +++ wli-2.6.0-test8-41-wchan/arch/i386/kernel/vmlinux.lds.S 2003-11-09 06:57:23.000000000 -0800 @@ -15,6 +15,9 @@ SECTIONS _text = .; /* Text and read-only data */ .text : { *(.text) + __scheduling_functions_start_here = .; + *(.text.sched) + __scheduling_functions_end_here = .; *(.fixup) *(.gnu.warning) } = 0x9090 diff -urpN wli-2.6.0-test8-41/include/linux/init.h wli-2.6.0-test8-41-wchan/include/linux/init.h --- wli-2.6.0-test8-41/include/linux/init.h 2003-10-28 21:24:16.000000000 -0800 +++ wli-2.6.0-test8-41-wchan/include/linux/init.h 2003-11-09 06:55:35.000000000 -0800 @@ -45,6 +45,8 @@ #define __exitdata __attribute__ ((__section__(".exit.data"))) #define __exit_call __attribute_used__ __attribute__ ((__section__ (".exitcall.exit"))) +#define __sched __attribute__((__section__(".text.sched"))) + #ifdef MODULE #define __exit __attribute__ ((__section__(".exit.text"))) #else diff -urpN wli-2.6.0-test8-41/include/linux/sched.h wli-2.6.0-test8-41-wchan/include/linux/sched.h --- wli-2.6.0-test8-41/include/linux/sched.h 2003-10-28 21:24:16.000000000 -0800 +++ wli-2.6.0-test8-41-wchan/include/linux/sched.h 2003-11-09 07:05:26.000000000 -0800 @@ -170,6 +170,8 @@ void update_one_process(task_t *p, unsig unsigned long system, int cpu); void scheduler_tick(int user_tick, int system); extern unsigned long cache_decay_ticks; +extern const unsigned long scheduling_functions_start_here; +extern const unsigned long scheduling_functions_end_here; #define MAX_SCHEDULE_TIMEOUT LONG_MAX diff -urpN wli-2.6.0-test8-41/kernel/sched.c wli-2.6.0-test8-41-wchan/kernel/sched.c --- wli-2.6.0-test8-41/kernel/sched.c 2003-10-28 21:24:16.000000000 -0800 +++ wli-2.6.0-test8-41-wchan/kernel/sched.c 2003-11-09 07:05:58.000000000 -0800 @@ -221,6 +221,13 @@ static DEFINE_PER_CPU(struct runqueue, r #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +extern unsigned long __scheduling_functions_start_here; +extern unsigned long __scheduling_functions_end_here; +const unsigned long scheduling_functions_start_here = + (unsigned long)&__scheduling_functions_start_here; +const unsigned long scheduling_functions_end_here = + (unsigned long)&__scheduling_functions_end_here; + /* * Default context-switch locking: */ @@ -1448,12 +1455,10 @@ out: rebalance_tick(rq, 0); } -void scheduling_functions_start_here(void) { } - /* * schedule() is the main scheduler function. */ -asmlinkage void schedule(void) +asmlinkage __sched void schedule(void) { task_t *prev, *next; runqueue_t *rq; @@ -1596,7 +1601,7 @@ EXPORT_SYMBOL(schedule); * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ -asmlinkage void preempt_schedule(void) +asmlinkage __sched void preempt_schedule(void) { struct thread_info *ti = current_thread_info(); @@ -1621,7 +1626,7 @@ need_resched: EXPORT_SYMBOL(preempt_schedule); #endif /* CONFIG_PREEMPT */ -int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) +__sched int default_wake_function(wait_queue_t *curr, unsigned mode, int sync) { task_t *p = curr->task; return try_to_wake_up(p, mode, sync, 0); @@ -1638,7 +1643,7 @@ EXPORT_SYMBOL(default_wake_function); * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) +static __sched void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync) { struct list_head *tmp, *next; @@ -1660,7 +1665,7 @@ static void __wake_up_common(wait_queue_ * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up */ -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +__sched void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; @@ -1692,7 +1697,7 @@ void __wake_up_locked(wait_queue_head_t * * On UP it can prevent extra preemption. */ -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +__sched void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; @@ -1709,7 +1714,7 @@ void __wake_up_sync(wait_queue_head_t *q EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ -void complete(struct completion *x) +__sched void complete(struct completion *x) { unsigned long flags; @@ -1721,7 +1726,7 @@ void complete(struct completion *x) EXPORT_SYMBOL(complete); -void complete_all(struct completion *x) +__sched void complete_all(struct completion *x) { unsigned long flags; @@ -1731,7 +1736,7 @@ void complete_all(struct completion *x) spin_unlock_irqrestore(&x->wait.lock, flags); } -void wait_for_completion(struct completion *x) +__sched void wait_for_completion(struct completion *x) { might_sleep(); spin_lock_irq(&x->wait.lock); @@ -1769,7 +1774,7 @@ EXPORT_SYMBOL(wait_for_completion); __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); -void interruptible_sleep_on(wait_queue_head_t *q) +__sched void interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1782,7 +1787,7 @@ void interruptible_sleep_on(wait_queue_h EXPORT_SYMBOL(interruptible_sleep_on); -long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) +__sched long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR @@ -1797,7 +1802,7 @@ long interruptible_sleep_on_timeout(wait EXPORT_SYMBOL(interruptible_sleep_on_timeout); -void sleep_on(wait_queue_head_t *q) +__sched void sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1810,7 +1815,7 @@ void sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(sleep_on); -long sleep_on_timeout(wait_queue_head_t *q, long timeout) +__sched long sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR @@ -1825,8 +1830,6 @@ long sleep_on_timeout(wait_queue_head_t EXPORT_SYMBOL(sleep_on_timeout); -void scheduling_functions_end_here(void) { } - void set_user_nice(task_t *p, long nice) { unsigned long flags; @@ -2285,7 +2288,7 @@ asmlinkage long sys_sched_yield(void) return 0; } -void __cond_resched(void) +__sched void __cond_resched(void) { set_current_state(TASK_RUNNING); schedule(); @@ -2299,7 +2302,7 @@ EXPORT_SYMBOL(__cond_resched); * this is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ -void yield(void) +__sched void yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); @@ -2314,7 +2317,7 @@ EXPORT_SYMBOL(yield); * But don't do that if it is a deliberate, throttling IO wait (this task * has set its backing_dev_info: the queue against which it should throttle) */ -void io_schedule(void) +__sched void io_schedule(void) { struct runqueue *rq = this_rq(); @@ -2325,7 +2328,7 @@ void io_schedule(void) EXPORT_SYMBOL(io_schedule); -long io_schedule_timeout(long timeout) +__sched long io_schedule_timeout(long timeout) { struct runqueue *rq = this_rq(); long ret; @@ -2875,7 +2878,7 @@ EXPORT_SYMBOL(__might_sleep); * * Called inside preempt_disable(). */ -void __preempt_spin_lock(spinlock_t *lock) +__sched void __preempt_spin_lock(spinlock_t *lock) { if (preempt_count() > 1) { _raw_spin_lock(lock); @@ -2891,7 +2894,7 @@ void __preempt_spin_lock(spinlock_t *loc EXPORT_SYMBOL(__preempt_spin_lock); -void __preempt_write_lock(rwlock_t *lock) +__sched void __preempt_write_lock(rwlock_t *lock) { if (preempt_count() > 1) { _raw_write_lock(lock); diff -urpN wli-2.6.0-test8-41/kernel/timer.c wli-2.6.0-test8-41-wchan/kernel/timer.c --- wli-2.6.0-test8-41/kernel/timer.c 2003-10-28 21:24:16.000000000 -0800 +++ wli-2.6.0-test8-41-wchan/kernel/timer.c 2003-11-09 07:02:48.000000000 -0800 @@ -993,7 +993,7 @@ static void process_timeout(unsigned lon * * In all cases the return value is guaranteed to be non-negative. */ -signed long schedule_timeout(signed long timeout) +__sched signed long schedule_timeout(signed long timeout) { struct timer_list timer; unsigned long expire; @@ -1054,7 +1054,7 @@ asmlinkage long sys_gettid(void) } #ifndef FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP -static long nanosleep_restart(struct restart_block *restart) +static __sched long nanosleep_restart(struct restart_block *restart) { unsigned long expire = restart->arg0, now = jiffies; struct timespec *rmtp = (struct timespec *) restart->arg1;