diff -urN 2.2.10/arch/i386/config.in 2.2.10-bigmem.orig/arch/i386/config.in --- 2.2.10/arch/i386/config.in Tue Jul 13 00:33:03 1999 +++ 2.2.10-bigmem.orig/arch/i386/config.in Thu Jul 22 21:52:55 1999 @@ -50,6 +50,7 @@ mainmenu_option next_comment comment 'General setup' +bool 'BIGMEM support' CONFIG_BIGMEM bool 'Networking support' CONFIG_NET bool 'PCI support' CONFIG_PCI if [ "$CONFIG_PCI" = "y" ]; then diff -urN 2.2.10/arch/i386/kernel/setup.c 2.2.10-bigmem.orig/arch/i386/kernel/setup.c --- 2.2.10/arch/i386/kernel/setup.c Tue Jul 13 00:33:20 1999 +++ 2.2.10-bigmem.orig/arch/i386/kernel/setup.c Thu Jul 22 21:54:00 1999 @@ -14,6 +14,8 @@ * Bart Hartgers , May 199. * * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999. + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ /* @@ -252,6 +254,10 @@ __initfunc(void setup_arch(char **cmdline_p, unsigned long * memory_start_p, unsigned long * memory_end_p)) { +#ifdef CONFIG_BIGMEM + extern unsigned long bigmem_start; + extern unsigned long bigmem_end; +#endif unsigned long memory_start, memory_end; char c = ' ', *to = command_line, *from = COMMAND_LINE; int len = 0; @@ -336,13 +342,34 @@ #define VMALLOC_RESERVE (64 << 20) /* 64MB for vmalloc */ #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)) +#ifdef CONFIG_BIGMEM + bigmem_start = bigmem_end = memory_end; +#endif + if (memory_end > MAXMEM) { +#ifdef CONFIG_BIGMEM +#define MAXBIGMEM ((unsigned long)(~(VMALLOC_RESERVE-1))) + bigmem_start = MAXMEM; + bigmem_end = (memory_end < MAXBIGMEM) ? memory_end : MAXBIGMEM; +#endif memory_end = MAXMEM; +#ifdef CONFIG_BIGMEM + printk(KERN_NOTICE "%ldMB BIGMEM available.\n", + (bigmem_end-bigmem_start)>>20); +#else printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); +#endif } - +#ifdef CONFIG_BIGMEM +#ifndef wich + else { + memory_end -= memory_end/4; + bigmem_start = memory_end; + } +#endif +#endif memory_end += PAGE_OFFSET; *memory_start_p = memory_start; *memory_end_p = memory_end; diff -urN 2.2.10/arch/i386/kernel/smp.c 2.2.10-bigmem.orig/arch/i386/kernel/smp.c --- 2.2.10/arch/i386/kernel/smp.c Tue Jul 13 00:33:20 1999 +++ 2.2.10-bigmem.orig/arch/i386/kernel/smp.c Thu Jul 22 21:54:20 1999 @@ -30,6 +30,7 @@ * Alan Cox : Added EBDA scanning * Ingo Molnar : various cleanups and rewrites * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. + * Gerhard Wichert, Siemens AG : Added BIGMEM support */ #include @@ -661,6 +662,15 @@ * We are called very early to get the low memory for the * SMP bootup trampoline page. */ +#ifdef CONFIG_BIGMEM +unsigned long __init smp_alloc_memory(unsigned long mem_base) +{ + if (mem_base >= 0x9F000) + panic("smp_alloc_memory: Insufficient low memory for kernel trampoline 0x%lx.", mem_base); + trampoline_base = (void *)(mem_base + PAGE_OFFSET); + return mem_base + PAGE_SIZE; +} +#else unsigned long __init smp_alloc_memory(unsigned long mem_base) { if (virt_to_phys((void *)mem_base) >= 0x9F000) @@ -668,6 +678,7 @@ trampoline_base = (void *)mem_base; return mem_base + PAGE_SIZE; } +#endif /* * The bootstrap kernel entry code has set these up. Save them for diff -urN 2.2.10/arch/i386/mm/init.c 2.2.10-bigmem.orig/arch/i386/mm/init.c --- 2.2.10/arch/i386/mm/init.c Tue Jan 26 19:27:22 1999 +++ 2.2.10-bigmem.orig/arch/i386/mm/init.c Thu Aug 5 13:04:32 1999 @@ -2,6 +2,8 @@ * linux/arch/i386/mm/init.c * * Copyright (C) 1995 Linus Torvalds + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include @@ -27,6 +29,9 @@ #include #include #include +#ifdef CONFIG_BIGMEM +#include +#endif extern void show_net_buffers(void); extern unsigned long init_smp_mappings(unsigned long); @@ -148,13 +153,19 @@ { int i,free = 0,total = 0,reserved = 0; int shared = 0, cached = 0; - +#ifdef CONFIG_BIGMEM + int bigmem = 0; +#endif printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); i = max_mapnr; while (i-- > 0) { total++; +#ifdef CONFIG_BIGMEM + if (PageBIGMEM(mem_map+i)) + bigmem++; +#endif if (PageReserved(mem_map+i)) reserved++; else if (PageSwapCache(mem_map+i)) @@ -165,6 +176,9 @@ shared += atomic_read(&mem_map[i].count) - 1; } printk("%d pages of RAM\n",total); +#ifdef CONFIG_BIGMEM + printk("%d pages of BIGMEM\n",bigmem); +#endif printk("%d reserved pages\n",reserved); printk("%d pages shared\n",shared); printk("%d pages swap cached\n",cached); @@ -229,6 +243,9 @@ pgd_val(*pg_dir) = _PAGE_TABLE | __pa(start_mem); start_mem += PAGE_SIZE; } +#ifdef CONFIG_BIGMEM + kmap_init(); +#endif return start_mem; } @@ -382,6 +399,101 @@ printk(".\n"); } +#ifdef CONFIG_BIGMEM +__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) +{ + extern unsigned long bigmem_start, bigmem_end; + unsigned long start_low_mem = PAGE_SIZE; + int codepages = 0; + int reservedpages = 0; + int datapages = 0; + int initpages = 0; + int bigpages = 0; + unsigned long tmp; + + printk("Initalizing Memory\n"); + end_mem &= PAGE_MASK; + high_memory = (void *) end_mem; + max_mapnr = num_physpages = PHYSMAP_NR(bigmem_end); + + /* clear the zero-page */ + memset(empty_zero_page, 0, PAGE_SIZE); + + /* mark usable pages in the mem_map[] */ + start_low_mem = PAGE_ALIGN(start_low_mem); + +#ifdef __SMP__ + /* + * But first pinch a few for the stack/trampoline stuff + * FIXME: Don't need the extra page at 4K, but need to fix + * trampoline before removing it. (see the GDT stuff) + * + */ + start_low_mem += PAGE_SIZE; /* 32bit startup code */ + start_low_mem = smp_alloc_memory(start_low_mem); /* AP processor stacks */ +#endif + start_mem = PAGE_ALIGN(start_mem); + + /* + * IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000. + * They seem to have done something stupid with the floppy + * controller as well.. + */ + while (start_low_mem < 0x9f000) { + clear_bit(PG_reserved, &mem_map[PHYSMAP_NR(start_low_mem)].flags); + start_low_mem += PAGE_SIZE; + } + + while (start_mem < end_mem) { + clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags); + start_mem += PAGE_SIZE; + } + for (tmp = PAGE_OFFSET ; tmp < end_mem ; tmp += PAGE_SIZE) { + if (tmp >= MAX_DMA_ADDRESS) + clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags); + if (PageReserved(mem_map+MAP_NR(tmp))) { + if (tmp >= (unsigned long) &_text && tmp < (unsigned long) &_edata) { + if (tmp < (unsigned long) &_etext) + codepages++; + else + datapages++; + } else if (tmp >= (unsigned long) &__init_begin + && tmp < (unsigned long) &__init_end) + initpages++; + else if (tmp >= (unsigned long) &__bss_start + && tmp < (unsigned long) start_mem) + datapages++; + else + reservedpages++; + continue; + } + atomic_set(&mem_map[MAP_NR(tmp)].count, 1); +#ifdef CONFIG_BLK_DEV_INITRD + if (!initrd_start || (tmp < initrd_start || tmp >= + initrd_end)) +#endif + free_page(tmp); + } + for (tmp = bigmem_start; tmp < bigmem_end; tmp += PAGE_SIZE) { + clear_bit(PG_reserved, &mem_map[PHYSMAP_NR(tmp)].flags); + set_bit(PG_BIGMEM, &mem_map[PHYSMAP_NR(tmp)].flags); + atomic_set(&mem_map[PHYSMAP_NR(tmp)].count, 1); + free_physpage(tmp); + bigpages++; + } + printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %dk bigmem)\n", + (unsigned long) nr_free_pages << (PAGE_SHIFT-10), + max_mapnr << (PAGE_SHIFT-10), + codepages << (PAGE_SHIFT-10), + reservedpages << (PAGE_SHIFT-10), + datapages << (PAGE_SHIFT-10), + initpages << (PAGE_SHIFT-10), + bigpages << (PAGE_SHIFT-10)); + + if (boot_cpu_data.wp_works_ok < 0) + test_wp_bit(); +} +#else __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) { unsigned long start_low_mem = PAGE_SIZE; @@ -464,6 +576,7 @@ if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } +#endif void free_initmem(void) { @@ -487,15 +600,26 @@ val->sharedram = 0; val->freeram = nr_free_pages << PAGE_SHIFT; val->bufferram = buffermem; +#ifdef CONFIG_BIGMEM + val->totalbig = 0; + val->freebig = nr_free_bigmem << PAGE_SHIFT; +#endif while (i-- > 0) { if (PageReserved(mem_map+i)) continue; val->totalram++; +#ifdef CONFIG_BIGMEM + if (i >= PHYSMAP_NR(bigmem_start)) + val->totalbig++; +#endif if (!atomic_read(&mem_map[i].count)) continue; val->sharedram += atomic_read(&mem_map[i].count) - 1; } val->totalram <<= PAGE_SHIFT; val->sharedram <<= PAGE_SHIFT; +#ifdef CONFIG_BIGMEM + val->totalbig <<= PAGE_SHIFT; +#endif return; } diff -urN 2.2.10/fs/proc/array.c 2.2.10-bigmem.orig/fs/proc/array.c --- 2.2.10/fs/proc/array.c Tue Jul 13 00:33:14 1999 +++ 2.2.10-bigmem.orig/fs/proc/array.c Thu Jul 22 21:52:56 1999 @@ -42,6 +42,8 @@ * Alan Cox : security fixes. * * + * Gerhard Wichert : added BIGMEM support + * Siemens AG */ #include @@ -354,6 +356,27 @@ * Tagged format, for easy grepping and expansion. The above will go away * eventually, once the tools have been updated. */ +#ifdef CONFIG_BIGMEM + return len + sprintf(buffer+len, + "MemTotal: %8lu kB\n" + "MemFree: %8lu kB\n" + "MemShared: %8lu kB\n" + "Buffers: %8lu kB\n" + "Cached: %8lu kB\n" + "BigMemTotal: %8lu kB\n" + "BigMemFree: %8lu kB\n" + "SwapTotal: %8lu kB\n" + "SwapFree: %8lu kB\n", + i.totalram >> 10, + i.freeram >> 10, + i.sharedram >> 10, + i.bufferram >> 10, + page_cache_size << (PAGE_SHIFT - 10), + i.totalbig >> 10, + i.freebig >> 10, + i.totalswap >> 10, + i.freeswap >> 10); +#else return len + sprintf(buffer+len, "MemTotal: %8lu kB\n" "MemFree: %8lu kB\n" @@ -369,6 +392,7 @@ page_cache_size << (PAGE_SHIFT - 10), i.totalswap >> 10, i.freeswap >> 10); +#endif } static int get_version(char * buffer) @@ -419,9 +443,51 @@ pte = *pte_offset(page_middle,ptr); if (!pte_present(pte)) return 0; +#ifdef CONFIG_BIGMEM + return pte_physpage(pte) + (ptr & ~PAGE_MASK); +#else return pte_page(pte) + (ptr & ~PAGE_MASK); +#endif } +#ifdef CONFIG_BIGMEM +#include + +static int get_array(struct task_struct *p, unsigned long start, unsigned long end, char * buffer) +{ + unsigned long addr; + int size = 0, result = 0; + char c; + + if (start >= end) + return result; + for (;;) { + addr = get_phys_addr(p, start); + if (!addr) + return result; + addr = kmap(addr, KM_READ); + do { + c = *(char *) addr; + if (!c) + result = size; + if (size < PAGE_SIZE) + buffer[size++] = c; + else { + kunmap(addr, KM_READ); + return result; + } + addr++; + start++; + if (!c && start >= end) { + kunmap(addr, KM_READ); + return result; + } + } while (addr & ~PAGE_MASK); + kunmap(addr, KM_READ); + } + return result; +} +#else static int get_array(struct task_struct *p, unsigned long start, unsigned long end, char * buffer) { unsigned long addr; @@ -450,6 +516,7 @@ } return result; } +#endif static int get_env(int pid, char * buffer) { diff -urN 2.2.10/include/asm-i386/fixmap.h 2.2.10-bigmem.orig/include/asm-i386/fixmap.h --- 2.2.10/include/asm-i386/fixmap.h Tue Aug 3 00:51:19 1999 +++ 2.2.10-bigmem.orig/include/asm-i386/fixmap.h Mon Aug 2 10:17:09 1999 @@ -6,6 +6,8 @@ * for more details. * * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #ifndef _ASM_FIXMAP_H @@ -14,6 +16,9 @@ #include #include #include +#ifdef CONFIG_BIGMEM +#include +#endif /* * Here we define all the compile-time 'special' virtual @@ -52,6 +57,10 @@ FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ +#endif +#ifdef CONFIG_BIGMEM + FIX_KMAP_START, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_START+(4*NR_CPUS), #endif __end_of_fixed_addresses }; diff -urN 2.2.10/include/asm-i386/io.h 2.2.10-bigmem.orig/include/asm-i386/io.h --- 2.2.10/include/asm-i386/io.h Tue Aug 3 00:51:21 1999 +++ 2.2.10-bigmem.orig/include/asm-i386/io.h Mon Aug 2 10:17:10 1999 @@ -27,6 +27,7 @@ /* * Bit simplified and optimized by Jan Hubicka + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. */ #ifdef SLOW_IO_BY_JUMPING @@ -101,8 +102,14 @@ #include #include +#ifdef CONFIG_BIGMEM +#define __io_virt(x) __va(x) +#define __io_phys(x) __pa(x) +#else #define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x))) #define __io_phys(x) ((unsigned long)(x) & ~PAGE_OFFSET) +#endif + /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial diff -urN 2.2.10/include/asm-i386/kmap.h 2.2.10-bigmem.orig/include/asm-i386/kmap.h --- 2.2.10/include/asm-i386/kmap.h Thu Jan 1 01:00:00 1970 +++ 2.2.10-bigmem.orig/include/asm-i386/kmap.h Thu Aug 5 13:04:32 1999 @@ -0,0 +1,67 @@ +/* + * kmap.h: virtual kernel memory mappings for big memory + * + * Used in CONFIG_BIGMEM systems for memory pages which are not + * addressable by direct kernel virtual adresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + */ + +#ifndef _ASM_KMAP_H +#define _ASM_KMAP_H + +#include + +enum km_type { + KM_READ, + KM_WRITE, + KM_RDINTR, + KM_WRINTR, + KM_TYPE_MAX +}; + +pte_t *kmap_pte; +pgprot_t kmap_prot; + +extern inline void kmap_init(void) +{ + unsigned long kmap_vstart = __fix_to_virt(FIX_KMAP_START); + + kmap_pte = pte_offset(pmd_offset(pgd_offset_k(kmap_vstart), kmap_vstart), kmap_vstart); + kmap_prot = PAGE_KERNEL; + if (boot_cpu_data.x86_capability & X86_FEATURE_PGE) + pgprot_val(kmap_prot) |= _PAGE_GLOBAL; +} + +extern inline unsigned long kmap(unsigned long kaddr, enum km_type type) +{ + if (__pa(kaddr) < bigmem_start) + return kaddr; + else { +#ifdef __SMP__ + enum fixed_addresses idx = type + KM_TYPE_MAX * smp_processor_id(); +#else + enum fixed_addresses idx = type; +#endif + set_pte(kmap_pte-idx, mk_pte(kaddr & PAGE_MASK, kmap_prot)); + __flush_tlb_one(__fix_to_virt(FIX_KMAP_START+idx)); + return __fix_to_virt(FIX_KMAP_START+idx) | (kaddr & (PAGE_SIZE-1)); + } +} + +extern inline void kunmap(unsigned long vaddr, enum km_type type) +{ +#ifdef __SMP__ + enum fixed_addresses idx = type + KM_TYPE_MAX * smp_processor_id(); +#else + enum fixed_addresses idx = type; +#endif + if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_START+idx)) { + pte_clear(kmap_pte-idx); + __flush_tlb_one(vaddr); + } +} + +#endif + diff -urN 2.2.10/include/asm-i386/page.h 2.2.10-bigmem.orig/include/asm-i386/page.h --- 2.2.10/include/asm-i386/page.h Fri Jan 29 15:51:18 1999 +++ 2.2.10-bigmem.orig/include/asm-i386/page.h Mon Aug 2 10:17:08 1999 @@ -85,6 +85,9 @@ #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT) +#ifdef CONFIG_BIGMEM +#define PHYSMAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT) +#endif #endif /* __KERNEL__ */ diff -urN 2.2.10/include/asm-i386/pgtable.h 2.2.10-bigmem.orig/include/asm-i386/pgtable.h --- 2.2.10/include/asm-i386/pgtable.h Tue Aug 3 00:51:20 1999 +++ 2.2.10-bigmem.orig/include/asm-i386/pgtable.h Mon Aug 2 10:17:10 1999 @@ -368,6 +368,11 @@ #define pte_page(pte) \ ((unsigned long) __va(pte_val(pte) & PAGE_MASK)) +#ifdef CONFIG_BIGMEM +#define pte_physpage(pte) \ +((unsigned long)(pte_val(pte) & PAGE_MASK)) +#endif + #define pmd_page(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) diff -urN 2.2.10/include/linux/kernel.h 2.2.10-bigmem.orig/include/linux/kernel.h --- 2.2.10/include/linux/kernel.h Wed Jul 21 22:15:25 1999 +++ 2.2.10-bigmem.orig/include/linux/kernel.h Mon Aug 2 10:17:08 1999 @@ -86,10 +86,19 @@ unsigned long freeram; /* Available memory size */ unsigned long sharedram; /* Amount of shared memory */ unsigned long bufferram; /* Memory used by buffers */ +#ifdef CONFIG_BIGMEM + unsigned long totalbig; /* Total big memory size */ + unsigned long freebig; /* Available big memory size */ + unsigned long totalswap; /* Total swap space size */ + unsigned long freeswap; /* swap space still available */ + unsigned short procs; /* Number of current processes */ + char _f[14]; /* Pads structure to 64 bytes */ +#else unsigned long totalswap; /* Total swap space size */ unsigned long freeswap; /* swap space still available */ unsigned short procs; /* Number of current processes */ char _f[22]; /* Pads structure to 64 bytes */ +#endif }; #endif diff -urN 2.2.10/include/linux/mm.h 2.2.10-bigmem.orig/include/linux/mm.h --- 2.2.10/include/linux/mm.h Tue Aug 3 00:51:20 1999 +++ 2.2.10-bigmem.orig/include/linux/mm.h Mon Aug 2 10:17:10 1999 @@ -12,6 +12,10 @@ extern unsigned long num_physpages; extern void * high_memory; extern int page_cluster; +#ifdef CONFIG_BIGMEM +extern unsigned long bigmem_start; +extern unsigned long bigmem_end; +#endif #include #include @@ -144,6 +148,9 @@ #define PG_Slab 9 #define PG_swap_cache 10 #define PG_skip 11 +#ifdef CONFIG_BIGMEM +#define PG_BIGMEM 12 +#endif #define PG_reserved 31 /* Make it prettier to test the above... */ @@ -158,6 +165,9 @@ #define PageDMA(page) (test_bit(PG_DMA, &(page)->flags)) #define PageSlab(page) (test_bit(PG_Slab, &(page)->flags)) #define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags)) +#ifdef CONFIG_BIGMEM +#define PageBIGMEM(page) (test_bit(PG_BIGMEM, &(page)->flags)) +#endif #define PageReserved(page) (test_bit(PG_reserved, &(page)->flags)) #define PageSetSlab(page) (set_bit(PG_Slab, &(page)->flags)) @@ -276,7 +286,10 @@ #define free_page(addr) free_pages((addr),0) extern void FASTCALL(free_pages(unsigned long addr, unsigned long order)); extern void FASTCALL(__free_page(struct page *)); - +#ifdef CONFIG_BIGMEM +#define free_physpage(addr) free_physpages((addr),0) +extern void FASTCALL(free_physpages(unsigned long addr, unsigned long order)); +#endif extern void show_free_areas(void); extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page, unsigned long address); @@ -333,10 +346,17 @@ #define __GFP_IO 0x10 #define __GFP_SWAP 0x20 +#ifdef CONFIG_BIGMEM +#define __GFP_BIGMEM 0x40 +#endif + #define __GFP_DMA 0x80 #define GFP_BUFFER (__GFP_LOW | __GFP_WAIT) #define GFP_ATOMIC (__GFP_HIGH) +#ifdef CONFIG_BIGMEM +#define GFP_BIGUSER (__GFP_LOW | __GFP_WAIT | __GFP_IO | __GFP_BIGMEM) +#endif #define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO) #define GFP_KERNEL (__GFP_MED | __GFP_WAIT | __GFP_IO) #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO) @@ -346,6 +366,13 @@ platforms, used as appropriate on others */ #define GFP_DMA __GFP_DMA + +#ifdef CONFIG_BIGMEM +/* Flag - indicates that the buffer can be taken from big memory which is not + directly addressable by the kernel */ + +#define GFP_BIGMEM __GFP_BIGMEM +#endif /* vma is the first one with address < vma->vm_end, * and even address < vma->vm_start. Have to extend vma. */ diff -urN 2.2.10/include/linux/swap.h 2.2.10-bigmem.orig/include/linux/swap.h --- 2.2.10/include/linux/swap.h Tue Aug 3 00:51:20 1999 +++ 2.2.10-bigmem.orig/include/linux/swap.h Mon Aug 2 10:17:09 1999 @@ -65,6 +65,11 @@ extern int nr_swap_pages; extern int nr_free_pages; + +#ifdef CONFIG_BIGMEM +extern int nr_free_bigmem; +#endif + extern atomic_t nr_async_pages; extern struct inode swapper_inode; extern unsigned long page_cache_size; diff -urN 2.2.10/mm/filemap.c 2.2.10-bigmem.orig/mm/filemap.c --- 2.2.10/mm/filemap.c Tue Jul 13 00:33:10 1999 +++ 2.2.10-bigmem.orig/mm/filemap.c Thu Aug 5 13:05:01 1999 @@ -24,11 +24,17 @@ #include #include +#ifdef CONFIG_BIGMEM +#include +#endif + /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. * * Shared mappings now work. 15.8.1995 Bruno. + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ unsigned long page_cache_size = 0; @@ -955,11 +961,13 @@ * that it's up-to-date. First check whether we'll need an * extra page -- better to overlap the allocation with the I/O. */ +#ifndef CONFIG_BIGMEM if (no_share && !new_page) { new_page = page_cache_alloc(); if (!new_page) goto failure; } +#endif if (PageLocked(page)) goto page_locked_wait; @@ -987,6 +995,17 @@ /* * No sharing ... copy to the new page. */ +#ifdef CONFIG_BIGMEM + if (!new_page) { + unsigned long kvaddr; + new_page = __get_free_page(GFP_BIGUSER); + if (!new_page) + goto no_page; + kvaddr = kmap(new_page, KM_READ); + copy_page(kvaddr, old_page); + kunmap(kvaddr, KM_READ); + } else +#endif copy_page(new_page, old_page); flush_page_to_ram(new_page); page_cache_release(page); diff -urN 2.2.10/mm/memory.c 2.2.10-bigmem.orig/mm/memory.c --- 2.2.10/mm/memory.c Tue Jul 13 00:33:23 1999 +++ 2.2.10-bigmem.orig/mm/memory.c Thu Jul 22 21:56:22 1999 @@ -31,6 +31,9 @@ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) + * + * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG + * (Gerhard.Wichert@pdb.siemens.de) */ #include @@ -613,6 +616,9 @@ * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. */ +#ifdef CONFIG_BIGMEM +#include +#endif static int do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, pte_t *page_table) { @@ -621,7 +627,11 @@ struct page * page_map; pte = *page_table; +#ifdef CONFIG_BIGMEM + new_page = __get_free_page(GFP_BIGUSER); +#else new_page = __get_free_page(GFP_USER); +#endif /* Did swap_out() unmapped the protected page while we slept? */ if (pte_val(*page_table) != pte_val(pte)) goto end_wp_page; @@ -673,7 +683,15 @@ if (PageReserved(page_map)) ++vma->vm_mm->rss; +#ifdef CONFIG_BIGMEM + { + unsigned long kaddr = kmap(new_page, KM_WRITE); + copy_cow_page((address&PAGE_MASK), kaddr); + kunmap(kaddr, KM_WRITE); + } +#else copy_cow_page(old_page,new_page); +#endif flush_page_to_ram(old_page); flush_page_to_ram(new_page); flush_cache_page(vma, address); @@ -815,10 +833,20 @@ { pte_t entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot)); if (write_access) { +#ifdef CONFIG_BIGMEM + unsigned long kaddr; + unsigned long page = __get_free_page(GFP_BIGUSER); + if (!page) + return 0; + kaddr = kmap(page, KM_WRITE); + clear_page(kaddr); + kunmap(kaddr, KM_WRITE); +#else unsigned long page = __get_free_page(GFP_USER); if (!page) return 0; clear_page(page); +#endif entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); vma->vm_mm->rss++; tsk->min_flt++; diff -urN 2.2.10/mm/page_alloc.c 2.2.10-bigmem.orig/mm/page_alloc.c --- 2.2.10/mm/page_alloc.c Tue Jul 13 00:33:10 1999 +++ 2.2.10-bigmem.orig/mm/page_alloc.c Thu Jul 22 21:52:56 1999 @@ -3,6 +3,7 @@ * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include @@ -21,6 +22,12 @@ int nr_swap_pages = 0; int nr_free_pages = 0; +#ifdef CONFIG_BIGMEM +unsigned long bigmem_start = 0; +unsigned long bigmem_end = 0; +int nr_free_bigmem = 0; +#endif + /* * Free area management * @@ -45,7 +52,12 @@ #define memory_head(x) ((struct page *)(x)) +#ifdef CONFIG_BIGMEM +#define BIGMEM_LISTS_OFFSET NR_MEM_LISTS +static struct free_area_struct free_area[NR_MEM_LISTS*2]; +#else static struct free_area_struct free_area[NR_MEM_LISTS]; +#endif static inline void init_mem_queue(struct free_area_struct * head) { @@ -101,6 +113,12 @@ #define list(x) (mem_map+(x)) +#ifdef CONFIG_BIGMEM + if (map_nr >= PHYSMAP_NR(bigmem_start)) { + area += BIGMEM_LISTS_OFFSET; + nr_free_bigmem -= mask; + } +#endif map_nr &= mask; nr_free_pages -= mask; while (mask + (1 << (NR_MEM_LISTS-1))) { @@ -130,6 +148,26 @@ } } +#ifdef CONFIG_BIGMEM +void free_physpages(unsigned long addr, unsigned long order) +{ + unsigned long map_nr = PHYSMAP_NR(addr); + + if (map_nr < max_mapnr) { + mem_map_t * map = mem_map + map_nr; + if (PageReserved(map)) + return; + if (atomic_dec_and_test(&map->count)) { + if (PageSwapCache(map)) + panic ("Freeing swap cache pages"); + map->flags &= ~(1 << PG_referenced); + free_pages_ok(map_nr, order); + return; + } + } +} +#endif + void free_pages(unsigned long addr, unsigned long order) { unsigned long map_nr = MAP_NR(addr); @@ -155,6 +193,47 @@ change_bit((index) >> (1+(order)), (area)->map) #define CAN_DMA(x) (PageDMA(x)) #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT)) +#ifdef CONFIG_BIGMEM +#define RMQUEUE(order, gfp_mask) \ +do { struct free_area_struct * area = free_area+BIGMEM_LISTS_OFFSET+order; \ + unsigned long new_order = order; \ + if (gfp_mask & __GFP_BIGMEM) { \ + do { struct page *prev = memory_head(area), *ret = prev->next; \ + if (memory_head(area) != ret) { \ + unsigned long map_nr; \ + (prev->next = ret->next)->prev = prev; \ + map_nr = ret - mem_map; \ + MARK_USED(map_nr, new_order, area); \ + nr_free_pages -= 1 << order; \ + nr_free_bigmem -= 1 << order; \ + EXPAND(ret, map_nr, order, new_order, area); \ + spin_unlock_irqrestore(&page_alloc_lock, flags); \ + return ADDRESS(map_nr); \ + } \ + new_order++; area++; \ + } while (new_order < NR_MEM_LISTS); \ + new_order = order; \ + } \ + area = free_area+order; \ + do { struct page *prev = memory_head(area), *ret = prev->next; \ + while (memory_head(area) != ret) { \ + if (!(gfp_mask & __GFP_DMA) || CAN_DMA(ret)) { \ + unsigned long map_nr; \ + (prev->next = ret->next)->prev = prev; \ + map_nr = ret - mem_map; \ + MARK_USED(map_nr, new_order, area); \ + nr_free_pages -= 1 << order; \ + EXPAND(ret, map_nr, order, new_order, area); \ + spin_unlock_irqrestore(&page_alloc_lock, flags); \ + return ADDRESS(map_nr); \ + } \ + prev = ret; \ + ret = ret->next; \ + } \ + new_order++; area++; \ + } while (new_order < NR_MEM_LISTS); \ +} while (0) +#else #define RMQUEUE(order, gfp_mask) \ do { struct free_area_struct * area = free_area+order; \ unsigned long new_order = order; \ @@ -176,7 +255,7 @@ new_order++; area++; \ } while (new_order < NR_MEM_LISTS); \ } while (0) - +#endif #define EXPAND(map,index,low,high,area) \ do { unsigned long size = 1 << high; \ while (high > low) { \ @@ -263,7 +342,13 @@ unsigned long order, flags; unsigned long total = 0; +#ifdef CONFIG_BIGMEM + printk("Free pages: %6dkB (%6dkB BigMem)\n ( ", + nr_free_pages<<(PAGE_SHIFT-10), + nr_free_bigmem<<(PAGE_SHIFT-10)); +#else printk("Free pages: %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10)); +#endif printk("Free: %d (%d %d %d)\n", nr_free_pages, freepages.min, @@ -276,6 +361,13 @@ for (tmp = free_area[order].next ; tmp != memory_head(free_area+order) ; tmp = tmp->next) { nr ++; } +#ifdef CONFIG_BIGMEM + for (tmp = free_area[BIGMEM_LISTS_OFFSET+order].next; + tmp != memory_head(free_area+BIGMEM_LISTS_OFFSET+order); + tmp = tmp->next) { + nr ++; + } +#endif total += nr * ((PAGE_SIZE>>10) << order); printk("%lu*%lukB ", nr, (unsigned long)((PAGE_SIZE>>10) << order)); } @@ -300,6 +392,8 @@ unsigned long mask = PAGE_MASK; unsigned long i; + end_mem -= PAGE_OFFSET; + /* * Select nr of pages we try to keep free for important stuff * with a minimum of 10 pages and a maximum of 256 pages, so @@ -307,7 +401,7 @@ * This is fairly arbitrary, but based on some behaviour * analysis. */ - i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7); + i = end_mem >> (PAGE_SHIFT+7); if (i < 10) i = 10; if (i > 256) @@ -316,7 +410,10 @@ freepages.low = i * 2; freepages.high = i * 3; mem_map = (mem_map_t *) LONG_ALIGN(start_mem); - p = mem_map + MAP_NR(end_mem); +#ifdef CONFIG_BIGMEM + end_mem = bigmem_end; +#endif + p = mem_map + (end_mem >> PAGE_SHIFT); start_mem = LONG_ALIGN((unsigned long) p); memset(mem_map, 0, start_mem - (unsigned long) mem_map); do { @@ -328,14 +425,22 @@ for (i = 0 ; i < NR_MEM_LISTS ; i++) { unsigned long bitmap_size; init_mem_queue(free_area+i); +#ifdef CONFIG_BIGMEM + init_mem_queue(free_area+BIGMEM_LISTS_OFFSET+i); +#endif mask += mask; end_mem = (end_mem + ~mask) & mask; - bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i); + bitmap_size = end_mem >> (PAGE_SHIFT + i); bitmap_size = (bitmap_size + 7) >> 3; bitmap_size = LONG_ALIGN(bitmap_size); free_area[i].map = (unsigned int *) start_mem; memset((void *) start_mem, 0, bitmap_size); start_mem += bitmap_size; +#ifdef CONFIG_BIGMEM + free_area[BIGMEM_LISTS_OFFSET+i].map = (unsigned int *) start_mem; + memset((void *) start_mem, 0, bitmap_size); + start_mem += bitmap_size; +#endif } return start_mem; } diff -urN 2.2.10/mm/vmalloc.c 2.2.10-bigmem.orig/mm/vmalloc.c --- 2.2.10/mm/vmalloc.c Tue Jul 13 00:33:04 1999 +++ 2.2.10-bigmem.orig/mm/vmalloc.c Thu Jul 22 21:52:56 1999 @@ -2,6 +2,7 @@ * linux/mm/vmalloc.c * * Copyright (C) 1993 Linus Torvalds + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include @@ -94,7 +95,11 @@ unsigned long page; if (!pte_none(*pte)) printk("alloc_area_pte: page already exists\n"); +#ifdef CONFIG_BIGMEM + page = __get_free_page(GFP_KERNEL|GFP_BIGMEM); +#else page = __get_free_page(GFP_KERNEL); +#endif if (!page) return -ENOMEM; set_pte(pte, mk_pte(page, PAGE_KERNEL));