diff -urN ref/arch/alpha/mm/fault.c stack-overflow/arch/alpha/mm/fault.c --- ref/arch/alpha/mm/fault.c Thu Feb 17 03:28:20 2000 +++ stack-overflow/arch/alpha/mm/fault.c Thu Feb 17 03:17:20 2000 @@ -106,7 +106,7 @@ goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, address)) + if (expand_stack(vma, address, NULL)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN ref/arch/i386/mm/fault.c stack-overflow/arch/i386/mm/fault.c --- ref/arch/i386/mm/fault.c Thu Feb 17 03:28:20 2000 +++ stack-overflow/arch/i386/mm/fault.c Thu Feb 17 03:17:20 2000 @@ -30,14 +30,14 @@ */ int __verify_write(const void * addr, unsigned long size) { - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long start = (unsigned long) addr; int fault; if (!size) return 1; - vma = find_vma(current->mm, start); + vma = find_vma_prev(current->mm, start, &prev_vma); if (!vma) goto bad_area; if (vma->vm_start > start) @@ -75,7 +75,7 @@ check_stack: if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, start) == 0) + if (expand_stack(vma, start, prev_vma) == 0) goto good_area; bad_area: @@ -137,7 +137,7 @@ { struct task_struct *tsk; struct mm_struct *mm; - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long address; unsigned long page; unsigned long fixup; @@ -159,7 +159,7 @@ down(&mm->mmap_sem); - vma = find_vma(mm, address); + vma = find_vma_prev(mm, address, &prev_vma); if (!vma) goto bad_area; if (vma->vm_start <= address) @@ -176,7 +176,7 @@ if (address + 32 < regs->esp) goto bad_area; } - if (expand_stack(vma, address)) + if (expand_stack(vma, address, prev_vma)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so diff -urN ref/arch/ppc/mm/fault.c stack-overflow/arch/ppc/mm/fault.c --- ref/arch/ppc/mm/fault.c Thu Feb 17 03:28:20 2000 +++ stack-overflow/arch/ppc/mm/fault.c Thu Feb 17 03:17:20 2000 @@ -60,7 +60,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; struct mm_struct *mm = current->mm; #if defined(CONFIG_4xx) int is_write = error_code & ESR_DST; @@ -96,14 +96,14 @@ return; } down(&mm->mmap_sem); - vma = find_vma(mm, address); + vma = find_vma_prev(mm, address, &prev_vma); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; - if (expand_stack(vma, address)) + if (expand_stack(vma, address, prev_vma)) goto bad_area; good_area: diff -urN ref/include/linux/mm.h stack-overflow/include/linux/mm.h --- ref/include/linux/mm.h Thu Feb 17 03:28:20 2000 +++ stack-overflow/include/linux/mm.h Thu Feb 17 03:19:53 2000 @@ -478,13 +478,18 @@ #define GFP_HIGHMEM __GFP_HIGHMEM +extern int heap_stack_gap; + /* vma is the first one with address < vma->vm_end, * and even address < vma->vm_start. Have to extend vma. */ -static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) +static inline int expand_stack(struct vm_area_struct * vma, unsigned long address, + struct vm_area_struct * prev_vma) { unsigned long grow; address &= PAGE_MASK; + if (prev_vma && prev_vma->vm_end + (heap_stack_gap << PAGE_SHIFT) > address) + return -ENOMEM; grow = (vma->vm_start - address) >> PAGE_SHIFT; if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) diff -urN ref/include/linux/sysctl.h stack-overflow/include/linux/sysctl.h --- ref/include/linux/sysctl.h Thu Feb 17 03:28:20 2000 +++ stack-overflow/include/linux/sysctl.h Thu Feb 17 03:17:20 2000 @@ -126,7 +126,8 @@ VM_PAGECACHE=7, /* struct: Set cache memory thresholds */ VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */ VM_PGT_CACHE=9, /* struct: Set page table cache parameters */ - VM_PAGE_CLUSTER=10 /* int: set number of pages to swap together */ + VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */ + VM_HEAP_STACK_GAP=11, /* int: page gap between heap and stack */ }; diff -urN ref/kernel/sysctl.c stack-overflow/kernel/sysctl.c --- ref/kernel/sysctl.c Thu Feb 17 03:28:20 2000 +++ stack-overflow/kernel/sysctl.c Thu Feb 17 03:17:20 2000 @@ -265,6 +265,8 @@ &pgt_cache_water, 2*sizeof(int), 0600, NULL, &proc_dointvec}, {VM_PAGE_CLUSTER, "page-cluster", &page_cluster, sizeof(int), 0600, NULL, &proc_dointvec}, + {VM_HEAP_STACK_GAP, "heap-stack-gap", + &heap_stack_gap, sizeof(int), 0644, NULL, &proc_dointvec}, {0} }; diff -urN ref/mm/mmap.c stack-overflow/mm/mmap.c --- ref/mm/mmap.c Thu Feb 17 03:28:20 2000 +++ stack-overflow/mm/mmap.c Thu Feb 17 03:19:33 2000 @@ -40,6 +40,7 @@ kmem_cache_t *vm_area_cachep; int sysctl_overcommit_memory; +int heap_stack_gap = 128; /* Check that a process has enough memory to allocate a * new virtual mapping. @@ -360,9 +361,14 @@ for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { /* At this point: (!vmm || addr < vmm->vm_end). */ + unsigned long __heap_stack_gap = 0; if (TASK_SIZE - len < addr) return 0; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm) + return addr; + if (vmm->vm_flags & VM_GROWSDOWN) + __heap_stack_gap = heap_stack_gap << PAGE_SHIFT; + if (addr + len + __heap_stack_gap <= vmm->vm_start) return addr; addr = vmm->vm_end; } @@ -465,11 +471,11 @@ struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr) { - struct vm_area_struct * vma; + struct vm_area_struct * vma, * prev_vma; unsigned long start; addr &= PAGE_MASK; - vma = find_vma(tsk->mm,addr); + vma = find_vma_prev(tsk->mm,addr, &prev_vma); if (!vma) return NULL; if (vma->vm_start <= addr) @@ -477,7 +483,7 @@ if (!(vma->vm_flags & VM_GROWSDOWN)) return NULL; start = vma->vm_start; - if (expand_stack(vma, addr)) + if (expand_stack(vma, addr, prev_vma)) return NULL; if (vma->vm_flags & VM_LOCKED) { make_pages_present(addr, start);