===== mm/mmap.c 1.27 vs edited ===== --- 1.27/mm/mmap.c Thu May 16 02:09:11 2002 +++ edited/mm/mmap.c Thu May 16 02:09:36 2002 @@ -784,11 +784,13 @@ */ area->vm_end = addr; lock_vma_mappings(area); + spin_lock(&mm->page_table_lock); } else if (addr == area->vm_start) { area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT; /* same locking considerations of the above case */ area->vm_start = end; lock_vma_mappings(area); + spin_lock(&mm->page_table_lock); } else { /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */ /* Add end mapping -- leave beginning for below */ @@ -815,10 +817,12 @@ * things correctly. */ lock_vma_mappings(area); + spin_lock(&mm->page_table_lock); __insert_vm_struct(mm, mpnt); } __insert_vm_struct(mm, area); + spin_unlock(&mm->page_table_lock); unlock_vma_mappings(area); return extra; } @@ -882,6 +886,110 @@ } } +/* + * Update the VMA and inode share lists. + * + * Ok - we have the memory areas we should free on the 'free' list, + * so release them, and do the vma updates. + * If the one of the segments is only being partially unmapped, + * it will put new vm_area_struct(s) into the address space. + * In that case we have to be careful with VM_DENYWRITE. + */ +static void fixup_unmap_vma_list(struct mm_struct *mm, + struct vm_area_struct *mpnt, + struct vm_area_struct *extra, + unsigned long start, + unsigned long end) +{ + struct vm_area_struct *next; + + do { + unsigned long from, to; + struct file *file = NULL; + + next = mpnt->vm_next; + + from = start < mpnt->vm_start ? mpnt->vm_start : start; + to = end > mpnt->vm_end ? mpnt->vm_end : end; + + if (mpnt->vm_flags & VM_DENYWRITE && + (from != mpnt->vm_start || to != mpnt->vm_end) && + (file = mpnt->vm_file) != NULL) { + atomic_dec(&file->f_dentry->d_inode->i_writecount); + } + remove_shared_vm_struct(mpnt); + + /* + * Fix the mapping, and free the old area if it wasn't reused. + */ + extra = unmap_fixup(mm, mpnt, from, to-from, extra); + if (file) + atomic_inc(&file->f_dentry->d_inode->i_writecount); + } while ((mpnt = next) != NULL); + validate_mm(mm); + + /* Release the extra vma struct if it wasn't used */ + if (extra) + kmem_cache_free(vm_area_cachep, extra); +} + +/* + * Get rid of page table information in the indicated region. + * + * Called with the page table lock held. + */ +static void unmap_region(struct mm_struct *mm, + struct vm_area_struct *mpnt, + struct vm_area_struct *prev, + unsigned long start, + unsigned long end) +{ + mmu_gather_t *tlb; + + tlb = tlb_gather_mmu(mm); + + do { + unsigned long from, to; + + from = start < mpnt->vm_start ? mpnt->vm_start : start; + to = end > mpnt->vm_end ? mpnt->vm_end : end; + + unmap_page_range(tlb, mpnt, from, to); + } while ((mpnt = mpnt->vm_next) != NULL); + + free_pgtables(tlb, prev, start, end); + tlb_finish_mmu(tlb, start, end); +} + +/* + * Create a list of vma's touched by the unmap, + * removing them from the VM lists as we go.. + * + * Called with the page_table_lock held. + */ +static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm, + struct vm_area_struct *mpnt, + struct vm_area_struct *prev, + unsigned long end) +{ + struct vm_area_struct **npp, *touched; + + npp = (prev ? &prev->vm_next : &mm->mmap); + + touched = NULL; + do { + struct vm_area_struct *next = mpnt->vm_next; + mpnt->vm_next = touched; + touched = mpnt; + mm->map_count--; + rb_erase(&mpnt->vm_rb, &mm->mm_rb); + mpnt = next; + } while (mpnt && mpnt->vm_start < end); + *npp = mpnt; + mm->mmap_cache = NULL; /* Kill the cache. */ + return touched; +} + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -889,8 +997,7 @@ */ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) { - mmu_gather_t *tlb; - struct vm_area_struct *mpnt, *prev, **npp, *free, *extra; + struct vm_area_struct *mpnt, *prev, *extra; if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) return -EINVAL; @@ -898,16 +1005,13 @@ if ((len = PAGE_ALIGN(len)) == 0) return -EINVAL; - /* Check if this memory area is ok - put it on the temporary - * list if so.. The checks here are pretty simple -- - * every area affected in some way (by any overlap) is put - * on the list. If nothing is put on, nothing is affected. - */ + /* Find the first overlapping VMA */ mpnt = find_vma_prev(mm, addr, &prev); if (!mpnt) return 0; /* we have addr < mpnt->vm_end */ + /* if it doesn't overlap, we have nothing.. */ if (mpnt->vm_start >= addr+len) return 0; @@ -924,61 +1028,16 @@ if (!extra) return -ENOMEM; - npp = (prev ? &prev->vm_next : &mm->mmap); - free = NULL; spin_lock(&mm->page_table_lock); - for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) { - *npp = mpnt->vm_next; - mpnt->vm_next = free; - free = mpnt; - rb_erase(&mpnt->vm_rb, &mm->mm_rb); - } - mm->mmap_cache = NULL; /* Kill the cache. */ - - tlb = tlb_gather_mmu(mm); - - /* Ok - we have the memory areas we should free on the 'free' list, - * so release them, and unmap the page range.. - * If the one of the segments is only being partially unmapped, - * it will put new vm_area_struct(s) into the address space. - * In that case we have to be careful with VM_DENYWRITE. - */ - while ((mpnt = free) != NULL) { - unsigned long st, end; - struct file *file = NULL; - - free = free->vm_next; + mpnt = touched_by_munmap(mm, mpnt, prev, addr+len); - st = addr < mpnt->vm_start ? mpnt->vm_start : addr; - end = addr+len; - end = end > mpnt->vm_end ? mpnt->vm_end : end; + /* Unmap the actual pages */ + unmap_region(mm, mpnt, prev, addr, addr+len); - if (mpnt->vm_flags & VM_DENYWRITE && - (st != mpnt->vm_start || end != mpnt->vm_end) && - (file = mpnt->vm_file) != NULL) { - atomic_dec(&file->f_dentry->d_inode->i_writecount); - } - remove_shared_vm_struct(mpnt); - mm->map_count--; - - unmap_page_range(tlb, mpnt, st, end); - - /* - * Fix the mapping, and free the old area if it wasn't reused. - */ - extra = unmap_fixup(mm, mpnt, st, end-st, extra); - if (file) - atomic_inc(&file->f_dentry->d_inode->i_writecount); - } - validate_mm(mm); - - /* Release the extra vma struct if it wasn't used */ - if (extra) - kmem_cache_free(vm_area_cachep, extra); - - free_pgtables(tlb, prev, addr, addr+len); - tlb_finish_mmu(tlb, addr, addr+len); spin_unlock(&mm->page_table_lock); + + /* Fix up all other VM information */ + fixup_unmap_vma_list(mm, mpnt, extra, addr, addr+len); return 0; }