diff -urN 2.2.10-bigmem-D/arch/i386/mm/init.c 2.2.10-bigmem/arch/i386/mm/init.c --- 2.2.10-bigmem-D/arch/i386/mm/init.c Sat Aug 7 15:02:59 1999 +++ 2.2.10-bigmem/arch/i386/mm/init.c Sun Aug 8 17:06:36 1999 @@ -228,15 +228,25 @@ #ifdef CONFIG_BIGMEM unsigned long bigmem_start; unsigned long bigmem_end; +unsigned long bigmem_mapnr; int nr_free_bigmem = 0; +/* NOTE: fixmap_init alloc all the fixmap pagetables contigous on the + physical space so we can cache the place of the first one and move + around without checking the pgd every time. */ pte_t *kmap_pte; pgprot_t kmap_prot; -static void kmap_init(void) +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) + +static void __init kmap_init(void) { - unsigned long kmap_vstart = __fix_to_virt(FIX_KMAP_START); + unsigned long kmap_vstart; + + /* cache the first kmap pte */ + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); - kmap_pte = pte_offset(pmd_offset(pgd_offset_k(kmap_vstart), kmap_vstart), kmap_vstart); kmap_prot = PAGE_KERNEL; if (boot_cpu_data.x86_capability & X86_FEATURE_PGE) pgprot_val(kmap_prot) |= _PAGE_GLOBAL; @@ -378,7 +388,11 @@ #endif local_flush_tlb(); +#ifndef CONFIG_BIGMEM return free_area_init(start_mem, end_mem); +#else + return free_area_init(start_mem, bigmem_end + PAGE_OFFSET); +#endif } /* @@ -431,11 +445,17 @@ unsigned long tmp; end_mem &= PAGE_MASK; +#ifdef CONFIG_BIGMEM + bigmem_start = PAGE_ALIGN(bigmem_start); + bigmem_end &= PAGE_MASK; +#endif high_memory = (void *) end_mem; #ifndef CONFIG_BIGMEM max_mapnr = num_physpages = MAP_NR(end_mem); #else max_mapnr = num_physpages = PHYSMAP_NR(bigmem_end); + /* cache the bigmem_mapnr */ + bigmem_mapnr = PHYSMAP_NR(bigmem_start); #endif /* clear the zero-page */ diff -urN 2.2.10-bigmem-D/fs/proc/array.c 2.2.10-bigmem/fs/proc/array.c --- 2.2.10-bigmem-D/fs/proc/array.c Thu Aug 5 19:02:40 1999 +++ 2.2.10-bigmem/fs/proc/array.c Sun Aug 8 17:00:42 1999 @@ -459,7 +459,9 @@ buffer[size++] = c; else { +#ifdef CONFIG_BIGMEM kunmap(addr, KM_READ); +#endif return result; } addr++; diff -urN 2.2.10-bigmem-D/include/asm-i386/fixmap.h 2.2.10-bigmem/include/asm-i386/fixmap.h --- 2.2.10-bigmem-D/include/asm-i386/fixmap.h Thu Aug 5 19:08:01 1999 +++ 2.2.10-bigmem/include/asm-i386/fixmap.h Sun Aug 8 17:09:03 1999 @@ -60,8 +60,8 @@ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif #ifdef CONFIG_BIGMEM - FIX_KMAP_START, /* reserved pte's for temporary kernel mappings */ - FIX_KMAP_END = FIX_KMAP_START+(KM_TYPE_NR*NR_CPUS), + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif __end_of_fixed_addresses }; diff -urN 2.2.10-bigmem-D/include/asm-i386/kmap.h 2.2.10-bigmem/include/asm-i386/kmap.h --- 2.2.10-bigmem-D/include/asm-i386/kmap.h Thu Aug 5 19:08:02 1999 +++ 2.2.10-bigmem/include/asm-i386/kmap.h Sun Aug 8 17:09:04 1999 @@ -27,7 +27,7 @@ return kaddr; { enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id(); - unsigned long vaddr = __fix_to_virt(FIX_KMAP_START+idx); + unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx); #ifdef KMAP_DEBUG if (!pte_none(*(kmap_pte-idx))) @@ -49,7 +49,7 @@ { #ifdef KMAP_DEBUG enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id(); - if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_START+idx)) + if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { /* force other mappings to Oops if they'll try to access this pte without first remap it */ diff -urN 2.2.10-bigmem-D/include/linux/mm.h 2.2.10-bigmem/include/linux/mm.h --- 2.2.10-bigmem-D/include/linux/mm.h Sun Aug 8 02:21:12 1999 +++ 2.2.10-bigmem/include/linux/mm.h Sun Aug 8 17:09:04 1999 @@ -282,6 +282,7 @@ #define free_page(addr) free_pages((addr),0) extern void FASTCALL(free_pages(unsigned long addr, unsigned long order)); extern void FASTCALL(__free_page(struct page *)); + extern void show_free_areas(void); extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page, unsigned long address); diff -urN 2.2.10-bigmem-D/mm/filemap.c 2.2.10-bigmem/mm/filemap.c --- 2.2.10-bigmem-D/mm/filemap.c Thu Aug 5 18:31:08 1999 +++ 2.2.10-bigmem/mm/filemap.c Sun Aug 8 14:42:47 1999 @@ -170,6 +170,11 @@ if ((gfp_mask & __GFP_DMA) && !PageDMA(page)) continue; +#ifdef CONFIG_BIGMEM + if (!(gfp_mask & __GFP_BIGMEM) && PageBIGMEM(page)) + continue; +#endif + /* We can't free pages unless there's just one user */ if (atomic_read(&page->count) != 1) continue; diff -urN 2.2.10-bigmem-D/mm/page_alloc.c 2.2.10-bigmem/mm/page_alloc.c --- 2.2.10-bigmem-D/mm/page_alloc.c Sat Aug 7 15:02:59 1999 +++ 2.2.10-bigmem/mm/page_alloc.c Sun Aug 8 17:06:01 1999 @@ -105,13 +105,16 @@ unsigned long index = map_nr >> (1 + order); unsigned long mask = (~0UL) << order; unsigned long flags; +#ifdef CONFIG_BIGMEM + extern unsigned long bigmem_mapnr; +#endif spin_lock_irqsave(&page_alloc_lock, flags); #define list(x) (mem_map+(x)) #ifdef CONFIG_BIGMEM - if (map_nr >= PHYSMAP_NR(bigmem_start)) { + if (map_nr >= bigmem_mapnr) { area += BIGMEM_LISTS_OFFSET; nr_free_bigmem -= mask; } @@ -145,7 +148,6 @@ } } - void free_pages(unsigned long addr, unsigned long order) { unsigned long map_nr = MAP_NR(addr); @@ -355,8 +357,6 @@ unsigned long mask = PAGE_MASK; unsigned long i; - end_mem -= PAGE_OFFSET; - /* * Select nr of pages we try to keep free for important stuff * with a minimum of 10 pages and a maximum of 256 pages, so @@ -364,7 +364,7 @@ * This is fairly arbitrary, but based on some behaviour * analysis. */ - i = end_mem >> (PAGE_SHIFT+7); + i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7); if (i < 10) i = 10; if (i > 256) @@ -373,10 +373,7 @@ freepages.low = i * 2; freepages.high = i * 3; mem_map = (mem_map_t *) LONG_ALIGN(start_mem); -#ifdef CONFIG_BIGMEM - end_mem = bigmem_end; -#endif - p = mem_map + (end_mem >> PAGE_SHIFT); + p = mem_map + MAP_NR(end_mem); start_mem = LONG_ALIGN((unsigned long) p); memset(mem_map, 0, start_mem - (unsigned long) mem_map); do { @@ -393,7 +390,7 @@ #endif mask += mask; end_mem = (end_mem + ~mask) & mask; - bitmap_size = end_mem >> (PAGE_SHIFT + i); + bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i); bitmap_size = (bitmap_size + 7) >> 3; bitmap_size = LONG_ALIGN(bitmap_size); free_area[i].map = (unsigned int *) start_mem; @@ -493,6 +490,43 @@ * this process. */ delete_from_swap_cache(page_map); +#ifdef CONFIG_BIGMEM + /* We want to use the bigmem pages as much as possible. We can't + do I/O over the bigmem pages so we used a regular page to do the + real swapin but now we can replace the regular page with a bigmem + page if there is any bigmem page available. */ + if (!PageBIGMEM(page_map) && nr_free_bigmem) + { + unsigned long bigmem_page; + + bigmem_page = __get_free_page(GFP_ATOMIC|GFP_BIGMEM); + if (bigmem_page) + { + struct page * bigmem_map; + + bigmem_map = MAP_NR(bigmem_page) + mem_map; + if (PageBIGMEM(bigmem_map)) + { + unsigned long vaddr, cached_swap_entry; + + vaddr = kmap(bigmem_page, KM_WRITE); + copy_page(vaddr, page); + kunmap(vaddr, KM_WRITE); + + /* we can just forget the old page since + we stored its data into the new + bigmem_page. */ + bigmem_map->offset = page_map->offset; + __free_page(page_map); + + /* put the bigmem page in place of the + previous page. */ + page = bigmem_page; + page_map = bigmem_map; + } + } + } +#endif set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)))); return; } diff -urN 2.2.10-bigmem-D/mm/vmscan.c 2.2.10-bigmem/mm/vmscan.c --- 2.2.10-bigmem-D/mm/vmscan.c Thu Aug 5 17:52:17 1999 +++ 2.2.10-bigmem/mm/vmscan.c Sun Aug 8 16:58:58 1999 @@ -60,7 +60,10 @@ if (PageReserved(page_map) || PageLocked(page_map) || ((gfp_mask & __GFP_DMA) && !PageDMA(page_map)) - || PageBIGMEM(page_map)) +#ifdef CONFIG_BIGMEM + || (!(gfp_mask & __GFP_BIGMEM) && PageBIGMEM(page_map)) +#endif + ) return 0; /* @@ -149,6 +152,36 @@ entry = get_swap_page(); if (!entry) return 0; /* No swap space left */ + +#ifdef BIGMEM + /* this is a bigmem page so it can't be swapped out directly + otherwise the b_data buffer addresses will break + the lowlevel device drivers. */ + if (PageBIGMEM(page_map)) + { + unsigned long regular_page; + unsigned long vaddr; + + regular_page = __get_free_page(GFP_ATOMIC); + if (!regular_page) + /* don't forget to undo the swap entry reference + we hold here. (but do that in the slow path) */ + goto out_free_swap; + + vaddr = kmap(page, KM_READ); + copy_page(regular_page, vaddr); + kunmap(vaddr, KM_READ); + + /* ok, we can just forget about our bigmem page since + we stored its data into the new regular_page. */ + __free_page(page_map); + + /* put the regular page in place of the just released + bigmeme page. */ + page = regular_page; + page_map = MAP_NR(regular_page) + mem_map; + } +#endif vma->vm_mm->rss--; tsk->nswap++; @@ -165,6 +198,10 @@ __free_page(page_map); return 1; + + out_free_swap: + swap_free(entry); + return 0; } /*