--- /tmp/linux-2.2.10/include/asm-i386/semaphore-helper.h Sat Feb 20 16:41:42 1999 +++ linux-2.2.10/include/asm-i386/semaphore-helper.h Fri Jul 16 16:41:30 1999 @@ -13,14 +13,19 @@ * * This is trivially done with load_locked/store_cond, * but on the x86 we need an external synchronizer. + * + * NOTE: we can't look at the semaphore count here since it can be + * unreliable. Even if the count is minor than 1, the semaphore + * could be just owned by another process (this because not only up() increases + * the semaphore count, also the interruptible/trylock call can increment + * the semaphore count when they fails). */ static inline void wake_one_more(struct semaphore * sem) { unsigned long flags; spin_lock_irqsave(&semaphore_wake_lock, flags); - if (atomic_read(&sem->count) <= 0) - sem->waking++; + sem->waking++; spin_unlock_irqrestore(&semaphore_wake_lock, flags); } @@ -44,9 +49,11 @@ * 0 go to sleep * -EINTR interrupted * - * We must undo the sem->count down_interruptible() increment while we are - * protected by the spinlock in order to make atomic this atomic_inc() with the - * atomic_read() in wake_one_more(), otherwise we can race. -arca + * If we give up we must undo our count-decrease we previously did in down(). + * Subtle: up() can continue to happens and increase the semaphore count + * even during our critical section protected by the spinlock. So + * we must remeber to undo the sem->waking that will be run from + * wake_one_more() some time soon, if the semaphore count become > 0. */ static inline int waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk) @@ -59,7 +66,8 @@ sem->waking--; ret = 1; } else if (signal_pending(tsk)) { - atomic_inc(&sem->count); + if (atomic_inc_and_test_greater_zero(&sem->count)) + sem->waking--; ret = -EINTR; } spin_unlock_irqrestore(&semaphore_wake_lock, flags); @@ -71,9 +79,7 @@ * 1 failed to lock * 0 got the lock * - * We must undo the sem->count down_trylock() increment while we are - * protected by the spinlock in order to make atomic this atomic_inc() with the - * atomic_read() in wake_one_more(), otherwise we can race. -arca + * Implementation details are the same of the interruptible case. */ static inline int waking_non_zero_trylock(struct semaphore *sem) { @@ -82,8 +88,10 @@ spin_lock_irqsave(&semaphore_wake_lock, flags); if (sem->waking <= 0) - atomic_inc(&sem->count); - else { + { + if (atomic_inc_and_test_greater_zero(&sem->count)) + sem->waking--; + } else { sem->waking--; ret = 0; } --- /tmp/linux-2.2.10/include/asm-i386/atomic.h Mon Jan 18 02:27:16 1999 +++ linux-2.2.10/include/asm-i386/atomic.h Thu Jul 15 18:02:06 1999 @@ -73,6 +73,17 @@ return c != 0; } +extern __inline__ int atomic_inc_and_test_greater_zero(volatile atomic_t *v) +{ + unsigned char c; + + __asm__ __volatile__( + LOCK "incl %0; setg %1" + :"=m" (__atomic_fool_gcc(v)), "=qm" (c) + :"m" (__atomic_fool_gcc(v))); + return c; /* can be only 0 or 1 */ +} + /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ __asm__ __volatile__(LOCK "andl %0,%1" \