diff -urN 2.3.13-pre8/Documentation/Configure.help 2.3.13-pre8-bigmem/Documentation/Configure.help --- 2.3.13-pre8/Documentation/Configure.help Sun Aug 8 17:21:26 1999 +++ 2.3.13-pre8-bigmem/Documentation/Configure.help Sun Aug 8 17:22:17 1999 @@ -168,6 +168,11 @@ on the Alpha. The only time you would ever not say Y is to say M in order to debug the code. Say Y unless you know what you are doing. +Big memory support +CONFIG_BIGMEM + This option is required if you want to utilize physical memory which + is not covered by the kernel virtual address space (> 1GB). + Normal PC floppy disk support CONFIG_BLK_DEV_FD If you want to use the floppy disk drive(s) of your PC under Linux, diff -urN 2.3.13-pre8/arch/i386/config.in 2.3.13-pre8-bigmem/arch/i386/config.in --- 2.3.13-pre8/arch/i386/config.in Sun Aug 8 17:21:27 1999 +++ 2.3.13-pre8-bigmem/arch/i386/config.in Sun Aug 8 17:22:17 1999 @@ -54,6 +54,7 @@ mainmenu_option next_comment comment 'General setup' +bool 'BIGMEM support' CONFIG_BIGMEM bool 'Networking support' CONFIG_NET bool 'PCI support' CONFIG_PCI if [ "$CONFIG_PCI" = "y" ]; then diff -urN 2.3.13-pre8/arch/i386/kernel/setup.c 2.3.13-pre8-bigmem/arch/i386/kernel/setup.c --- 2.3.13-pre8/arch/i386/kernel/setup.c Sun Aug 8 17:21:27 1999 +++ 2.3.13-pre8-bigmem/arch/i386/kernel/setup.c Sun Aug 8 17:22:17 1999 @@ -14,6 +14,8 @@ * Bart Hartgers , May 1999. * * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999. + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ /* @@ -268,6 +270,9 @@ __initfunc(void setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigned long * memory_end_p)) { +#ifdef CONFIG_BIGMEM + extern unsigned long bigmem_start, bigmem_end; +#endif unsigned long memory_start, memory_end; char c = ' ', *to = command_line, *from = COMMAND_LINE; int len = 0; @@ -351,12 +356,33 @@ #define VMALLOC_RESERVE (128 << 20) /* 128MB for vmalloc and initrd */ #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)) +#ifdef CONFIG_BIGMEM + bigmem_start = bigmem_end = memory_end; +#endif if (memory_end > MAXMEM) { +#ifdef CONFIG_BIGMEM +#define MAXBIGMEM ((unsigned long)(~(VMALLOC_RESERVE-1))) + bigmem_start = MAXMEM; + bigmem_end = (memory_end < MAXBIGMEM) ? memory_end : MAXBIGMEM; +#endif memory_end = MAXMEM; +#ifdef CONFIG_BIGMEM + printk(KERN_NOTICE "%ldMB BIGMEM available.\n", + (bigmem_end-bigmem_start)>>20); +#else printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); +#endif } +#ifdef CONFIG_BIGMEM +#ifndef wich + else { + memory_end -= memory_end/4; + bigmem_start = memory_end; + } +#endif +#endif memory_end += PAGE_OFFSET; *memory_start_p = memory_start; diff -urN 2.3.13-pre8/arch/i386/mm/Makefile 2.3.13-pre8-bigmem/arch/i386/mm/Makefile --- 2.3.13-pre8/arch/i386/mm/Makefile Mon Jan 18 02:28:56 1999 +++ 2.3.13-pre8-bigmem/arch/i386/mm/Makefile Sun Aug 8 18:19:53 1999 @@ -10,4 +10,8 @@ O_TARGET := mm.o O_OBJS := init.o fault.o ioremap.o extable.o +ifeq ($(CONFIG_BIGMEM),y) +O_OBJS += bigmem.o +endif + include $(TOPDIR)/Rules.make diff -urN 2.3.13-pre8/arch/i386/mm/bigmem.c 2.3.13-pre8-bigmem/arch/i386/mm/bigmem.c --- 2.3.13-pre8/arch/i386/mm/bigmem.c Thu Jan 1 01:00:00 1970 +++ 2.3.13-pre8-bigmem/arch/i386/mm/bigmem.c Sun Aug 8 19:59:35 1999 @@ -0,0 +1,95 @@ +/* + * BIGMEM code and variables. + * bigmem page conversions, Andrea Arcangeli + */ + +#include +#include +#include + +unsigned long bigmem_start, bigmem_end, bigmem_mapnr; +int nr_free_bigpages = 0; + +/* NOTE: fixmap_init alloc all the fixmap pagetables contigous on the + physical space so we can cache the place of the first one and move + around without checking the pgd every time. */ +pte_t *kmap_pte; +pgprot_t kmap_prot; + +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) + +void __init kmap_init(void) +{ + unsigned long kmap_vstart; + + /* cache the first kmap pte */ + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); + + kmap_prot = PAGE_KERNEL; + if (boot_cpu_data.x86_capability & X86_FEATURE_PGE) + pgprot_val(kmap_prot) |= _PAGE_GLOBAL; +} + +struct page * prepare_bigmem_swapout(struct page * page) +{ + /* if this is a bigmem page so it can't be swapped out directly + otherwise the b_data buffer addresses will break + the lowlevel device drivers. */ + if (PageBIGMEM(page)) + { + unsigned long regular_page; + unsigned long vaddr; + + regular_page = __get_free_page(GFP_ATOMIC); + if (!regular_page) + return NULL; + + vaddr = kmap(page_address(page), KM_READ); + copy_page(regular_page, vaddr); + kunmap(vaddr, KM_READ); + + /* ok, we can just forget about our bigmem page since + we stored its data into the new regular_page. */ + __free_page(page); + + page = MAP_NR(regular_page) + mem_map; + } + return page; +} + +struct page * replace_with_bigmem(struct page * page) +{ + if (!PageBIGMEM(page) && nr_free_bigpages) + { + unsigned long kaddr; + + kaddr = __get_free_page(GFP_ATOMIC|GFP_BIGMEM); + if (kaddr) + { + struct page * bigmem_page; + + bigmem_page = MAP_NR(kaddr) + mem_map; + if (PageBIGMEM(bigmem_page)) + { + unsigned long vaddr; + + vaddr = kmap(kaddr, KM_WRITE); + copy_page(vaddr, page_address(page)); + kunmap(vaddr, KM_WRITE); + + /* Preserve the caching of the swap_entry. */ + bigmem_page->offset = page->offset; + + /* We can just forget the old page since + we stored its data into the new + bigmem_page. */ + __free_page(page); + + page = bigmem_page; + } + } + } + return page; +} diff -urN 2.3.13-pre8/arch/i386/mm/init.c 2.3.13-pre8-bigmem/arch/i386/mm/init.c --- 2.3.13-pre8/arch/i386/mm/init.c Sun Aug 8 17:21:27 1999 +++ 2.3.13-pre8-bigmem/arch/i386/mm/init.c Sun Aug 8 19:00:46 1999 @@ -2,6 +2,8 @@ * linux/arch/i386/mm/init.c * * Copyright (C) 1995 Linus Torvalds + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include @@ -27,8 +29,14 @@ #include #include #include +#ifdef CONFIG_BIGMEM +#include +#endif static unsigned long totalram = 0; +#ifdef CONFIG_BIGMEM +static unsigned long totalbig = 0; +#endif extern void show_net_buffers(void); extern unsigned long init_smp_mappings(unsigned long); @@ -150,6 +158,9 @@ { int i,free = 0,total = 0,reserved = 0; int shared = 0, cached = 0; +#ifdef CONFIG_BIGMEM + int bigmem = 0; +#endif printk("Mem-info:\n"); show_free_areas(); @@ -157,6 +168,10 @@ i = max_mapnr; while (i-- > 0) { total++; +#ifdef CONFIG_BIGMEM + if (PageBIGMEM(mem_map+i)) + bigmem++; +#endif if (PageReserved(mem_map+i)) reserved++; else if (PageSwapCache(mem_map+i)) @@ -167,6 +182,9 @@ shared += page_count(mem_map+i) - 1; } printk("%d pages of RAM\n",total); +#ifdef CONFIG_BIGMEM + printk("%d pages of BIGMEM\n",bigmem); +#endif printk("%d reserved pages\n",reserved); printk("%d pages shared\n",shared); printk("%d pages swap cached\n",cached); @@ -315,7 +333,12 @@ #endif local_flush_tlb(); +#ifndef CONFIG_BIGMEM return free_area_init(start_mem, end_mem); +#else + kmap_init(); /* run after fixmap_init */ + return free_area_init(start_mem, bigmem_end + PAGE_OFFSET); +#endif } /* @@ -364,8 +387,18 @@ unsigned long endbase; end_mem &= PAGE_MASK; +#ifdef CONFIG_BIGMEM + bigmem_start = PAGE_ALIGN(bigmem_start); + bigmem_end &= PAGE_MASK; +#endif high_memory = (void *) end_mem; +#ifndef CONFIG_BIGMEM max_mapnr = num_physpages = MAP_NR(end_mem); +#else + max_mapnr = num_physpages = PHYSMAP_NR(bigmem_end); + /* cache the bigmem_mapnr */ + bigmem_mapnr = PHYSMAP_NR(bigmem_start); +#endif /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); @@ -428,13 +461,31 @@ #endif free_page(tmp); } - printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", +#ifdef CONFIG_BIGMEM + for (tmp = bigmem_start; tmp < bigmem_end; tmp += PAGE_SIZE) { + clear_bit(PG_reserved, &mem_map[PHYSMAP_NR(tmp)].flags); + set_bit(PG_BIGMEM, &mem_map[PHYSMAP_NR(tmp)].flags); + atomic_set(&mem_map[PHYSMAP_NR(tmp)].count, 1); + free_page(tmp + PAGE_OFFSET); + totalbig += PAGE_SIZE; + } + totalram += totalbig; +#endif + printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init" +#ifdef CONFIG_BIGMEM + ", %dk bigmem" +#endif + ")\n", (unsigned long) nr_free_pages << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), reservedpages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), - initpages << (PAGE_SHIFT-10)); + initpages << (PAGE_SHIFT-10) +#ifdef CONFIG_BIGMEM + ,(int) (totalbig >> 10) +#endif + ); if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); @@ -460,5 +511,9 @@ val->sharedram = 0; val->freeram = nr_free_pages << PAGE_SHIFT; val->bufferram = atomic_read(&buffermem); +#ifdef CONFIG_BIGMEM + val->totalbig = totalbig; + val->freebig = nr_free_bigpages << PAGE_SHIFT; +#endif return; } diff -urN 2.3.13-pre8/fs/proc/array.c 2.3.13-pre8-bigmem/fs/proc/array.c --- 2.3.13-pre8/fs/proc/array.c Sun Aug 8 17:21:37 1999 +++ 2.3.13-pre8-bigmem/fs/proc/array.c Sun Aug 8 19:43:52 1999 @@ -44,6 +44,8 @@ * * Al Viro : safe handling of mm_struct * + * Gerhard Wichert : added BIGMEM support + * Siemens AG */ #include @@ -362,6 +364,10 @@ "MemShared: %8lu kB\n" "Buffers: %8lu kB\n" "Cached: %8u kB\n" +#ifdef CONFIG_BIGMEM + "BigTotal: %8lu kB\n" + "BigFree: %8lu kB\n" +#endif "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n", i.totalram >> 10, @@ -369,6 +375,10 @@ i.sharedram >> 10, i.bufferram >> 10, atomic_read(&page_cache_size) << (PAGE_SHIFT - 10), +#ifdef CONFIG_BIGMEM + i.totalbig >> 10, + i.freebig >> 10, +#endif i.totalswap >> 10, i.freeswap >> 10); } @@ -418,6 +428,10 @@ return pte_page(pte) + (ptr & ~PAGE_MASK); } +#ifdef CONFIG_BIGMEM +#include +#endif + static int get_array(struct mm_struct *mm, unsigned long start, unsigned long end, char * buffer) { unsigned long addr; @@ -430,6 +444,9 @@ addr = get_phys_addr(mm, start); if (!addr) return result; +#ifdef CONFIG_BIGMEM + addr = kmap(addr, KM_READ); +#endif do { c = *(char *) addr; if (!c) @@ -437,12 +454,25 @@ if (size < PAGE_SIZE) buffer[size++] = c; else + { +#ifdef CONFIG_BIGMEM + kunmap(addr, KM_READ); +#endif return result; + } addr++; start++; if (!c && start >= end) + { +#ifdef CONFIG_BIGMEM + kunmap(addr, KM_READ); +#endif return result; + } } while (addr & ~PAGE_MASK); +#ifdef CONFIG_BIGMEM + kunmap(addr, KM_READ); +#endif } return result; } diff -urN 2.3.13-pre8/include/asm-i386/bigmem.h 2.3.13-pre8-bigmem/include/asm-i386/bigmem.h --- 2.3.13-pre8/include/asm-i386/bigmem.h Thu Jan 1 01:00:00 1970 +++ 2.3.13-pre8-bigmem/include/asm-i386/bigmem.h Mon Aug 9 11:32:20 1999 @@ -0,0 +1,70 @@ +/* + * bigmem.h: virtual kernel memory mappings for big memory + * + * Used in CONFIG_BIGMEM systems for memory pages which are not + * addressable by direct kernel virtual adresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + */ + +#ifndef _ASM_BIGMEM_H +#define _ASM_BIGMEM_H + +#include + +/* declarations for bigmem.c */ +extern unsigned long bigmem_start, bigmem_end, bigmem_mapnr; +extern int nr_free_bigpages; + +extern pte_t *kmap_pte; +extern pgprot_t kmap_prot; + +extern void kmap_init(void) __init; +extern struct page * prepare_bigmem_swapout(struct page *); +extern struct page * replace_with_bigmem(struct page *); + +/* kmap helper functions necessary to access the bigmem pages in kernel */ +#include +#include +#define KMAP_DEBUG /* undef for production */ + +extern inline unsigned long kmap(unsigned long kaddr, enum km_type type) +{ + if (__pa(kaddr) < bigmem_start) + return kaddr; + { + enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id(); + unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx); + +#ifdef KMAP_DEBUG + if (!pte_none(*(kmap_pte-idx))) + { + __label__ here; + here: + printk(KERN_ERR "not null pte on CPU %d from %p\n", + smp_processor_id(), &&here); + } +#endif + set_pte(kmap_pte-idx, mk_pte(kaddr & PAGE_MASK, kmap_prot)); + __flush_tlb_one(vaddr); + + return vaddr | (kaddr & ~PAGE_MASK); + } +} + +extern inline void kunmap(unsigned long vaddr, enum km_type type) +{ +#ifdef KMAP_DEBUG + enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id(); + if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx)) + { + /* force other mappings to Oops if they'll try to access + this pte without first remap it */ + pte_clear(kmap_pte-idx); + __flush_tlb_one(vaddr); + } +#endif +} + +#endif /* _ASM_BIGMEM_H */ diff -urN 2.3.13-pre8/include/asm-i386/fixmap.h 2.3.13-pre8-bigmem/include/asm-i386/fixmap.h --- 2.3.13-pre8/include/asm-i386/fixmap.h Mon Aug 9 02:06:51 1999 +++ 2.3.13-pre8-bigmem/include/asm-i386/fixmap.h Mon Aug 9 11:32:19 1999 @@ -6,6 +6,8 @@ * for more details. * * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #ifndef _ASM_FIXMAP_H @@ -15,6 +17,10 @@ #include #include #include +#ifdef CONFIG_BIGMEM +#include +#include +#endif /* * Here we define all the compile-time 'special' virtual @@ -54,6 +60,10 @@ FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ +#endif +#ifdef CONFIG_BIGMEM + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif __end_of_fixed_addresses }; diff -urN 2.3.13-pre8/include/asm-i386/io.h 2.3.13-pre8-bigmem/include/asm-i386/io.h --- 2.3.13-pre8/include/asm-i386/io.h Mon Aug 9 02:06:53 1999 +++ 2.3.13-pre8-bigmem/include/asm-i386/io.h Mon Aug 9 11:32:20 1999 @@ -27,6 +27,7 @@ /* * Bit simplified and optimized by Jan Hubicka + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. */ #ifdef SLOW_IO_BY_JUMPING @@ -101,8 +102,14 @@ #include #include +#ifdef CONFIG_BIGMEM +#define __io_virt(x) __va(x) +#define __io_phys(x) __pa(x) +#else #define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x))) #define __io_phys(x) ((unsigned long)(x) & ~PAGE_OFFSET) +#endif + /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial diff -urN 2.3.13-pre8/include/asm-i386/kmap_types.h 2.3.13-pre8-bigmem/include/asm-i386/kmap_types.h --- 2.3.13-pre8/include/asm-i386/kmap_types.h Thu Jan 1 01:00:00 1970 +++ 2.3.13-pre8-bigmem/include/asm-i386/kmap_types.h Sun Aug 8 17:22:17 1999 @@ -0,0 +1,10 @@ +#ifndef _ASM_KMAP_TYPES_H +#define _ASM_KMAP_TYPES_H + +enum km_type { + KM_READ, + KM_WRITE, + KM_TYPE_NR, +}; + +#endif diff -urN 2.3.13-pre8/include/asm-i386/page.h 2.3.13-pre8-bigmem/include/asm-i386/page.h --- 2.3.13-pre8/include/asm-i386/page.h Mon Aug 9 02:06:51 1999 +++ 2.3.13-pre8-bigmem/include/asm-i386/page.h Mon Aug 9 11:32:18 1999 @@ -101,6 +101,9 @@ #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT) +#ifdef CONFIG_BIGMEM +#define PHYSMAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT) +#endif #endif /* __KERNEL__ */ diff -urN 2.3.13-pre8/include/linux/kernel.h 2.3.13-pre8-bigmem/include/linux/kernel.h --- 2.3.13-pre8/include/linux/kernel.h Sun Aug 8 17:21:39 1999 +++ 2.3.13-pre8-bigmem/include/linux/kernel.h Mon Aug 9 11:32:18 1999 @@ -90,10 +90,13 @@ unsigned long freeram; /* Available memory size */ unsigned long sharedram; /* Amount of shared memory */ unsigned long bufferram; /* Memory used by buffers */ +#ifdef CONFIG_BIGMEM + unsigned long totalbig; /* Total big memory size */ + unsigned long freebig; /* Available big memory size */ +#endif unsigned long totalswap; /* Total swap space size */ unsigned long freeswap; /* swap space still available */ unsigned short procs; /* Number of current processes */ - char _f[22]; /* Pads structure to 64 bytes */ }; #endif diff -urN 2.3.13-pre8/include/linux/mm.h 2.3.13-pre8-bigmem/include/linux/mm.h --- 2.3.13-pre8/include/linux/mm.h Mon Aug 9 02:06:52 1999 +++ 2.3.13-pre8-bigmem/include/linux/mm.h Mon Aug 9 11:32:19 1999 @@ -152,6 +152,9 @@ #define PG_Slab 8 #define PG_swap_cache 9 #define PG_skip 10 +#ifdef CONFIG_BIGMEM +#define PG_BIGMEM 11 +#endif /* bits 21-30 unused */ #define PG_reserved 31 @@ -185,6 +188,9 @@ #define PageDMA(page) (test_bit(PG_DMA, &(page)->flags)) #define PageSlab(page) (test_bit(PG_Slab, &(page)->flags)) #define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags)) +#ifdef CONFIG_BIGMEM +#define PageBIGMEM(page) (test_bit(PG_BIGMEM, &(page)->flags)) +#endif #define PageReserved(page) (test_bit(PG_reserved, &(page)->flags)) #define PageSetSlab(page) (set_bit(PG_Slab, &(page)->flags)) @@ -358,11 +364,17 @@ #define __GFP_HIGH 0x08 #define __GFP_IO 0x10 #define __GFP_SWAP 0x20 +#ifdef CONFIG_BIGMEM +#define __GFP_BIGMEM 0x40 +#endif #define __GFP_DMA 0x80 #define GFP_BUFFER (__GFP_LOW | __GFP_WAIT) #define GFP_ATOMIC (__GFP_HIGH) +#ifdef CONFIG_BIGMEM +#define GFP_BIGUSER (__GFP_LOW | __GFP_WAIT | __GFP_IO | __GFP_BIGMEM) +#endif #define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO) #define GFP_KERNEL (__GFP_MED | __GFP_WAIT | __GFP_IO) #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO) @@ -372,6 +384,13 @@ platforms, used as appropriate on others */ #define GFP_DMA __GFP_DMA + +#ifdef CONFIG_BIGMEM +/* Flag - indicates that the buffer can be taken from big memory which is not + directly addressable by the kernel */ + +#define GFP_BIGMEM __GFP_BIGMEM +#endif /* vma is the first one with address < vma->vm_end, * and even address < vma->vm_start. Have to extend vma. */ diff -urN 2.3.13-pre8/mm/filemap.c 2.3.13-pre8-bigmem/mm/filemap.c --- 2.3.13-pre8/mm/filemap.c Sun Aug 8 17:21:41 1999 +++ 2.3.13-pre8-bigmem/mm/filemap.c Sun Aug 8 17:33:16 1999 @@ -248,6 +248,11 @@ if ((gfp_mask & __GFP_DMA) && !PageDMA(page)) continue; +#ifdef CONFIG_BIGMEM + if (!(gfp_mask & __GFP_BIGMEM) && PageBIGMEM(page)) + continue; +#endif + count--; /* diff -urN 2.3.13-pre8/mm/memory.c 2.3.13-pre8-bigmem/mm/memory.c --- 2.3.13-pre8/mm/memory.c Sun Aug 8 17:21:41 1999 +++ 2.3.13-pre8-bigmem/mm/memory.c Sun Aug 8 19:12:51 1999 @@ -31,6 +31,9 @@ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) + * + * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG + * (Gerhard.Wichert@pdb.siemens.de) */ #include @@ -767,6 +770,9 @@ * We enter with the page table read-lock held, and need to exit without * it. */ +#ifdef CONFIG_BIGMEM +#include +#endif static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t pte) { @@ -807,7 +813,11 @@ * Ok, we need to copy. Oh, well.. */ spin_unlock(&tsk->mm->page_table_lock); +#ifndef CONFIG_BIGMEM new_page = __get_free_page(GFP_USER); +#else + new_page = __get_free_page(GFP_BIGUSER); +#endif if (!new_page) return -1; spin_lock(&tsk->mm->page_table_lock); @@ -818,7 +828,18 @@ if (pte_val(*page_table) == pte_val(pte)) { if (PageReserved(page)) ++vma->vm_mm->rss; +#ifndef CONFIG_BIGMEM copy_cow_page(old_page,new_page); +#else + { + unsigned long old_vpage, new_vpage; + old_vpage = kmap(old_page, KM_READ); + new_vpage = kmap(new_page, KM_WRITE); + copy_cow_page(old_vpage, new_vpage); + kunmap(old_vpage, KM_READ); + kunmap(new_vpage, KM_WRITE); + } +#endif flush_page_to_ram(old_page); flush_page_to_ram(new_page); flush_cache_page(vma, address); @@ -994,6 +1015,10 @@ if (write_access && !is_page_shared(page)) { delete_from_swap_cache(page); +#ifdef CONFIG_BIGMEM + page = replace_with_bigmem(page); + pte = mk_pte(page_address(page), vma->vm_page_prot); +#endif pte = pte_mkwrite(pte_mkdirty(pte)); } set_pte(page_table, pte); @@ -1009,10 +1034,20 @@ { pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot)); if (write_access) { +#ifndef CONFIG_BIGMEM unsigned long page = __get_free_page(GFP_USER); if (!page) return -1; clear_page(page); +#else + unsigned long vaddr; + unsigned long page = __get_free_page(GFP_BIGUSER); + if (!page) + return -1; + vaddr = kmap(page, KM_WRITE); + clear_page(vaddr); + kunmap(vaddr, KM_WRITE); +#endif entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); vma->vm_mm->rss++; tsk->min_flt++; diff -urN 2.3.13-pre8/mm/page_alloc.c 2.3.13-pre8-bigmem/mm/page_alloc.c --- 2.3.13-pre8/mm/page_alloc.c Tue Jul 13 02:02:40 1999 +++ 2.3.13-pre8-bigmem/mm/page_alloc.c Mon Aug 9 11:54:12 1999 @@ -3,6 +3,7 @@ * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include @@ -17,6 +18,9 @@ #include #include /* for copy_to/from_user */ #include +#ifdef CONFIG_BIGMEM +#include /* export bigmem vars */ +#endif int nr_swap_pages = 0; int nr_free_pages = 0; @@ -45,7 +49,12 @@ #define memory_head(x) ((struct page *)(x)) +#ifdef CONFIG_BIGMEM +#define BIGMEM_LISTS_OFFSET NR_MEM_LISTS +static struct free_area_struct free_area[NR_MEM_LISTS*2]; +#else static struct free_area_struct free_area[NR_MEM_LISTS]; +#endif static inline void init_mem_queue(struct free_area_struct * head) { @@ -96,11 +105,20 @@ unsigned long index = map_nr >> (1 + order); unsigned long mask = (~0UL) << order; unsigned long flags; +#ifdef CONFIG_BIGMEM + extern unsigned long bigmem_mapnr; +#endif spin_lock_irqsave(&page_alloc_lock, flags); #define list(x) (mem_map+(x)) +#ifdef CONFIG_BIGMEM + if (map_nr >= bigmem_mapnr) { + area += BIGMEM_LISTS_OFFSET; + nr_free_bigpages -= mask; + } +#endif map_nr &= mask; nr_free_pages -= mask; while (mask + (1 << (NR_MEM_LISTS-1))) { @@ -160,6 +178,29 @@ change_bit((index) >> (1+(order)), (area)->map) #define CAN_DMA(x) (PageDMA(x)) #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT)) + +#ifdef CONFIG_BIGMEM +#define RMQUEUEBIG(order, gfp_mask) \ +if (gfp_mask & __GFP_BIGMEM) { \ + struct free_area_struct * area = free_area+order+BIGMEM_LISTS_OFFSET; \ + unsigned long new_order = order; \ + do { struct page *prev = memory_head(area), *ret = prev->next; \ + if (memory_head(area) != ret) { \ + unsigned long map_nr; \ + (prev->next = ret->next)->prev = prev; \ + map_nr = ret - mem_map; \ + MARK_USED(map_nr, new_order, area); \ + nr_free_pages -= 1 << order; \ + nr_free_bigpages -= 1 << order; \ + EXPAND(ret, map_nr, order, new_order, area); \ + spin_unlock_irqrestore(&page_alloc_lock, flags); \ + return ADDRESS(map_nr); \ + } \ + new_order++; area++; \ + } while (new_order < NR_MEM_LISTS); \ +} +#endif + #define RMQUEUE(order, gfp_mask) \ do { struct free_area_struct * area = free_area+order; \ unsigned long new_order = order; \ @@ -195,6 +236,9 @@ } while (0) int low_on_memory = 0; +#ifdef CONFIG_BIGMEM +int low_on_bigmemory = 0; +#endif unsigned long __get_free_pages(int gfp_mask, unsigned long order) { @@ -222,6 +266,7 @@ if (!(current->flags & PF_MEMALLOC)) { int freed; +#ifndef CONFIG_BIGMEM if (nr_free_pages > freepages.min) { if (!low_on_memory) goto ok_to_allocate; @@ -232,6 +277,30 @@ } low_on_memory = 1; +#else + if (gfp_mask & __GFP_BIGMEM) + { + if (nr_free_pages > freepages.min) { + if (!low_on_bigmemory) + goto ok_to_allocate; + if (nr_free_pages >= freepages.high) { + low_on_bigmemory = 0; + goto ok_to_allocate; + } + } + low_on_bigmemory = 1; + } else { + if (nr_free_pages-nr_free_bigpages > freepages.min) { + if (!low_on_memory) + goto ok_to_allocate; + if (nr_free_pages-nr_free_bigpages >= freepages.high) { + low_on_memory = 0; + goto ok_to_allocate; + } + } + low_on_memory = 1; + } +#endif current->flags |= PF_MEMALLOC; freed = try_to_free_pages(gfp_mask); current->flags &= ~PF_MEMALLOC; @@ -241,6 +310,9 @@ } ok_to_allocate: spin_lock_irqsave(&page_alloc_lock, flags); +#ifdef CONFIG_BIGMEM + RMQUEUEBIG(order, gfp_mask); +#endif RMQUEUE(order, gfp_mask); spin_unlock_irqrestore(&page_alloc_lock, flags); @@ -268,7 +340,13 @@ unsigned long order, flags; unsigned long total = 0; +#ifdef CONFIG_BIGMEM + printk("Free pages: %6dkB (%6dkB BigMem)\n ( ", + nr_free_pages<<(PAGE_SHIFT-10), + nr_free_bigpages<<(PAGE_SHIFT-10)); +#else printk("Free pages: %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10)); +#endif printk("Free: %d (%d %d %d)\n", nr_free_pages, freepages.min, @@ -281,6 +359,13 @@ for (tmp = free_area[order].next ; tmp != memory_head(free_area+order) ; tmp = tmp->next) { nr ++; } +#ifdef CONFIG_BIGMEM + for (tmp = free_area[BIGMEM_LISTS_OFFSET+order].next; + tmp != memory_head(free_area+BIGMEM_LISTS_OFFSET+order); + tmp = tmp->next) { + nr ++; + } +#endif total += nr * ((PAGE_SIZE>>10) << order); printk("%lu*%lukB ", nr, (unsigned long)((PAGE_SIZE>>10) << order)); } @@ -334,6 +419,9 @@ for (i = 0 ; i < NR_MEM_LISTS ; i++) { unsigned long bitmap_size; init_mem_queue(free_area+i); +#ifdef CONFIG_BIGMEM + init_mem_queue(free_area+BIGMEM_LISTS_OFFSET+i); +#endif mask += mask; end_mem = (end_mem + ~mask) & mask; bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i); @@ -342,6 +430,11 @@ free_area[i].map = (unsigned int *) start_mem; memset((void *) start_mem, 0, bitmap_size); start_mem += bitmap_size; +#ifdef CONFIG_BIGMEM + free_area[BIGMEM_LISTS_OFFSET+i].map = (unsigned int *) start_mem; + memset((void *) start_mem, 0, bitmap_size); + start_mem += bitmap_size; +#endif } return start_mem; } diff -urN 2.3.13-pre8/mm/vmalloc.c 2.3.13-pre8-bigmem/mm/vmalloc.c --- 2.3.13-pre8/mm/vmalloc.c Tue Jul 13 00:33:04 1999 +++ 2.3.13-pre8-bigmem/mm/vmalloc.c Sun Aug 8 17:22:17 1999 @@ -2,6 +2,7 @@ * linux/mm/vmalloc.c * * Copyright (C) 1993 Linus Torvalds + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include @@ -94,7 +95,11 @@ unsigned long page; if (!pte_none(*pte)) printk("alloc_area_pte: page already exists\n"); +#ifdef CONFIG_BIGMEM + page = __get_free_page(GFP_KERNEL|GFP_BIGMEM); +#else page = __get_free_page(GFP_KERNEL); +#endif if (!page) return -ENOMEM; set_pte(pte, mk_pte(page, PAGE_KERNEL)); diff -urN 2.3.13-pre8/mm/vmscan.c 2.3.13-pre8-bigmem/mm/vmscan.c --- 2.3.13-pre8/mm/vmscan.c Sun Aug 8 17:21:41 1999 +++ 2.3.13-pre8-bigmem/mm/vmscan.c Mon Aug 9 11:46:22 1999 @@ -19,6 +19,9 @@ #include #include +#ifdef CONFIG_BIGMEM +#include +#endif /* * The swap-out functions return 1 if they successfully @@ -66,7 +69,11 @@ if (PageReserved(page) || PageLocked(page) - || ((gfp_mask & __GFP_DMA) && !PageDMA(page))) + || ((gfp_mask & __GFP_DMA) && !PageDMA(page)) +#ifdef CONFIG_BIGMEM + || (!(gfp_mask & __GFP_BIGMEM) && PageBIGMEM(page)) +#endif + ) goto out_failed_unlock; /* @@ -157,6 +164,11 @@ if (!entry) goto out_failed_unlock; /* No swap space left */ +#ifdef CONFIG_BIGMEM + if (!(page = prepare_bigmem_swapout(page))) + goto out_swap_free_unlock; +#endif + vma->vm_mm->rss--; set_pte(page_table, __pte(entry)); spin_unlock(&vma->vm_mm->page_table_lock); @@ -177,6 +189,11 @@ spin_unlock(&vma->vm_mm->page_table_lock); out_failed: return 0; +out_swap_free_unlock: + swap_free(entry); + spin_unlock(&vma->vm_mm->page_table_lock); + return 0; + } /* @@ -487,7 +504,13 @@ * up on a more timely basis. */ do { +#ifndef CONFIG_BIGMEM if (nr_free_pages >= freepages.high) +#else + /* kswapd is critical to provide GFP_ATOMIC + allocations (not GFP_BIGMEME ones). */ + if (nr_free_pages - nr_free_bigpages >= freepages.high) +#endif break; if (!do_try_to_free_pages(GFP_KSWAPD))