diff -urN 2.3.17/arch/i386/kernel/irq.c shm/arch/i386/kernel/irq.c --- 2.3.17/arch/i386/kernel/irq.c Wed Sep 8 00:25:40 1999 +++ shm/arch/i386/kernel/irq.c Thu Sep 9 20:31:43 1999 @@ -151,10 +151,7 @@ static inline void check_smp_invalidate(int cpu) { if (test_bit(cpu, &smp_invalidate_needed)) { - struct mm_struct *mm = current->mm; clear_bit(cpu, &smp_invalidate_needed); - if (mm) - atomic_set_mask(1 << cpu, &mm->cpu_vm_mask); local_flush_tlb(); } } diff -urN 2.3.17/arch/i386/kernel/smp.c shm/arch/i386/kernel/smp.c --- 2.3.17/arch/i386/kernel/smp.c Wed Sep 8 18:18:45 1999 +++ shm/arch/i386/kernel/smp.c Fri Sep 10 02:19:33 1999 @@ -1635,10 +1635,7 @@ * Take care of "crossing" invalidates */ if (test_bit(cpu, &smp_invalidate_needed)) { - struct mm_struct *mm = current->mm; clear_bit(cpu, &smp_invalidate_needed); - if (mm) - atomic_set_mask(1 << cpu, &mm->cpu_vm_mask); local_flush_tlb(); } --stuck; @@ -1660,39 +1657,33 @@ */ void flush_tlb_current_task(void) { - unsigned long vm_mask = 1 << current->processor; - struct mm_struct *mm = current->mm; - unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask; + unsigned long cpu_mask; - mm->cpu_vm_mask = vm_mask; + rmb(); + cpu_mask = current->mm->cpu_vm_mask & ~(1 << smp_processor_id()); flush_tlb_others(cpu_mask); local_flush_tlb(); } void flush_tlb_mm(struct mm_struct * mm) { - unsigned long vm_mask = 1 << current->processor; - unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask; + unsigned long cpu_mask; - mm->cpu_vm_mask = 0; - if (current->active_mm == mm) { - mm->cpu_vm_mask = vm_mask; + rmb(); + cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); + if (current->active_mm == mm) local_flush_tlb(); - } flush_tlb_others(cpu_mask); } void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) { - unsigned long vm_mask = 1 << current->processor; - struct mm_struct *mm = vma->vm_mm; - unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask; + unsigned long cpu_mask; - mm->cpu_vm_mask = 0; - if (current->active_mm == mm) { + rmb(); + cpu_mask = vma->vm_mm->cpu_vm_mask & ~(1 << smp_processor_id()); + if (current->active_mm == vma->vm_mm) __flush_tlb_one(va); - mm->cpu_vm_mask = vm_mask; - } flush_tlb_others(cpu_mask); } @@ -1934,12 +1925,8 @@ struct task_struct *tsk = current; unsigned int cpu = tsk->processor; - if (test_and_clear_bit(cpu, &smp_invalidate_needed)) { - struct mm_struct *mm = tsk->mm; - if (mm) - atomic_set_mask(1 << cpu, &mm->cpu_vm_mask); + if (test_and_clear_bit(cpu, &smp_invalidate_needed)) local_flush_tlb(); - } ack_APIC_irq(); } diff -urN 2.3.17/include/asm-i386/mmu_context.h shm/include/asm-i386/mmu_context.h --- 2.3.17/include/asm-i386/mmu_context.h Thu Sep 9 20:45:45 1999 +++ shm/include/asm-i386/mmu_context.h Fri Sep 10 02:03:19 1999 @@ -12,7 +12,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { - if (prev != next) { /* * Re-load LDT if necessary @@ -20,11 +19,13 @@ if (prev->segments != next->segments) load_LDT(next); + clear_bit(cpu, &prev->cpu_vm_mask); + set_bit(cpu, &next->cpu_vm_mask); + wmb(); + /* Re-load page tables */ asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd))); - clear_bit(cpu, &prev->cpu_vm_mask); } - set_bit(cpu, &next->cpu_vm_mask); } #define activate_mm(prev, next) \