diff -u --recursive --new-file v1.3.55/linux/Makefile linux/Makefile --- v1.3.55/linux/Makefile Sat Jan 6 19:10:38 1996 +++ linux/Makefile Sat Jan 6 19:11:09 1996 @@ -1,6 +1,6 @@ VERSION = 1 PATCHLEVEL = 3 -SUBLEVEL = 55 +SUBLEVEL = 56 ARCH = i386 diff -u --recursive --new-file v1.3.55/linux/arch/alpha/kernel/process.c linux/arch/alpha/kernel/process.c --- v1.3.55/linux/arch/alpha/kernel/process.c Tue Nov 21 13:22:05 1995 +++ linux/arch/alpha/kernel/process.c Mon Jan 8 08:14:53 1996 @@ -140,6 +140,7 @@ p->tss.usp = usp; p->tss.ksp = (unsigned long) childstack; p->tss.flags = 1; + p->mm->context = 0; } /* diff -u --recursive --new-file v1.3.55/linux/arch/alpha/kernel/signal.c linux/arch/alpha/kernel/signal.c --- v1.3.55/linux/arch/alpha/kernel/signal.c Wed Sep 13 12:45:29 1995 +++ linux/arch/alpha/kernel/signal.c Mon Jan 8 08:14:54 1996 @@ -23,7 +23,6 @@ asmlinkage void ret_from_sys_call(void); asmlinkage int do_signal(unsigned long, struct pt_regs *, struct switch_stack *, unsigned long, unsigned long); -asmlinkage void imb(void); extern int ptrace_set_bpt (struct task_struct *child); extern int ptrace_cancel_bpt (struct task_struct *child); diff -u --recursive --new-file v1.3.55/linux/fs/buffer.c linux/fs/buffer.c --- v1.3.55/linux/fs/buffer.c Sat Jan 6 19:10:40 1996 +++ linux/fs/buffer.c Mon Jan 8 08:14:54 1996 @@ -1099,7 +1099,7 @@ */ int generic_readpage(struct inode * inode, struct page * page) { - unsigned long block; + unsigned long block, address; int *p, nr[PAGE_SIZE/512]; int i; @@ -1113,10 +1113,20 @@ p++; } while (i > 0); - /* We should make this asynchronous, but this is good enough for now.. */ - bread_page(page_address(page), inode->i_dev, nr, inode->i_sb->s_blocksize); + /* + * We should make this asynchronous, but this is good enough for now.. + */ + + /* IO start */ + page->count++; + address = page_address(page); + bread_page(address, inode->i_dev, nr, inode->i_sb->s_blocksize); + + /* IO ready (this part should be in the "page ready callback" function) */ page->uptodate = 1; wake_up(&page->wait); + free_page(address); + return 0; } diff -u --recursive --new-file v1.3.55/linux/fs/msdos/mmap.c linux/fs/msdos/mmap.c --- v1.3.55/linux/fs/msdos/mmap.c Thu Nov 9 11:23:51 1995 +++ linux/fs/msdos/mmap.c Sun Jan 7 18:27:06 1996 @@ -27,14 +27,17 @@ static unsigned long msdos_file_mmap_nopage( struct vm_area_struct * area, unsigned long address, - unsigned long page, int error_code) { struct inode * inode = area->vm_inode; + unsigned long page; unsigned int clear; int pos; long gap; /* distance from eof to pos */ + page = __get_free_page(GFP_KERNEL); + if (!page) + return page; address &= PAGE_MASK; pos = address - area->vm_start + area->vm_offset; diff -u --recursive --new-file v1.3.55/linux/fs/namei.c linux/fs/namei.c --- v1.3.55/linux/fs/namei.c Sat Dec 30 15:50:54 1995 +++ linux/fs/namei.c Mon Jan 8 08:10:25 1996 @@ -757,7 +757,7 @@ char * to; struct inode * oldinode; - error = namei(oldname, &oldinode); + error = lnamei(oldname, &oldinode); if (error) return error; error = getname(newname,&to); diff -u --recursive --new-file v1.3.55/linux/fs/ncpfs/mmap.c linux/fs/ncpfs/mmap.c --- v1.3.55/linux/fs/ncpfs/mmap.c Tue Jan 2 16:46:28 1996 +++ linux/fs/ncpfs/mmap.c Sun Jan 7 18:27:06 1996 @@ -31,15 +31,19 @@ */ static unsigned long ncp_file_mmap_nopage(struct vm_area_struct * area, - unsigned long address, unsigned long page, int no_share) + unsigned long address, int no_share) { struct inode * inode = area->vm_inode; + unsigned long page; unsigned int clear; unsigned long tmp; int bufsize; int pos; unsigned short fs; + page = __get_free_page(GFP_KERNEL); + if (!page) + return page; address &= PAGE_MASK; pos = address - area->vm_start + area->vm_offset; diff -u --recursive --new-file v1.3.55/linux/fs/nfs/mmap.c linux/fs/nfs/mmap.c --- v1.3.55/linux/fs/nfs/mmap.c Thu Nov 9 11:23:51 1995 +++ linux/fs/nfs/mmap.c Sun Jan 7 18:31:51 1996 @@ -25,12 +25,14 @@ #include /* - * Fill in the supplied page for mmap + * Return a page for mmap. We need to start using the page cache, + * because otherwise we can't share pages between processes.. */ static unsigned long nfs_file_mmap_nopage(struct vm_area_struct * area, - unsigned long address, unsigned long page, int no_share) + unsigned long address, int no_share) { struct inode * inode = area->vm_inode; + unsigned long page; unsigned int clear; unsigned long tmp; int n; @@ -38,6 +40,9 @@ int pos; struct nfs_fattr fattr; + page = __get_free_page(GFP_KERNEL); + if (!page) + return page; address &= PAGE_MASK; pos = address - area->vm_start + area->vm_offset; diff -u --recursive --new-file v1.3.55/linux/fs/smbfs/mmap.c linux/fs/smbfs/mmap.c --- v1.3.55/linux/fs/smbfs/mmap.c Sat Nov 11 17:41:34 1995 +++ linux/fs/smbfs/mmap.c Sun Jan 7 18:27:06 1996 @@ -25,15 +25,19 @@ */ static unsigned long smb_file_mmap_nopage(struct vm_area_struct * area, - unsigned long address, unsigned long page, int no_share) + unsigned long address, int no_share) { struct inode * inode = area->vm_inode; + unsigned long page; unsigned int clear; unsigned long tmp; int n; int i; int pos; + page = __get_free_page(GFP_KERNEL); + if (!page) + return 0; address &= PAGE_MASK; pos = address - area->vm_start + area->vm_offset; diff -u --recursive --new-file v1.3.55/linux/include/asm-alpha/mmu_context.h linux/include/asm-alpha/mmu_context.h --- v1.3.55/linux/include/asm-alpha/mmu_context.h Sat Jan 6 19:10:40 1996 +++ linux/include/asm-alpha/mmu_context.h Mon Jan 8 09:49:44 1996 @@ -7,16 +7,38 @@ * Copyright (C) 1996, Linus Torvalds */ -#include +#include +#include /* - * The maximum ASN's the processor supports. On the EV4 this doesn't - * matter as the pal-code doesn't use the ASNs anyway, on the EV5 + * The maximum ASN's the processor supports. On the EV4 this is 63 + * but the PAL-code doesn't actually use this information. On the * EV5 this is 127. + * + * On the EV4, the ASNs are more-or-less useless anyway, as they are + * only used as a icache tag, not for TB entries. On the EV5 ASN's + * also validate the TB entries, and thus make a lot more sense. + * + * The EV4 ASN's don't even match the architecture manual, ugh. And + * I quote: "If a processor implements address space numbers (ASNs), + * and the old PTE has the Address Space Match (ASM) bit clear (ASNs + * in use) and the Valid bit set, then entries can also effectively be + * made coherent by assigning a new, unused ASN to the currently + * running process and not reusing the previous ASN before calling the + * appropriate PALcode routine to invalidate the translation buffer + * (TB)". + * + * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually + * work correctly and can thus not be used (explaining the lack of PAL-code + * support). */ +#ifdef CONFIG_EV5 #define MAX_ASN 127 +#else +#define MAX_ASN 63 +#endif -#define ASN_VERSION_SHIFT 32 +#define ASN_VERSION_SHIFT 16 #define ASN_VERSION_MASK ((~0UL) << ASN_VERSION_SHIFT) #define ASN_FIRST_VERSION (1UL << ASN_VERSION_SHIFT) @@ -33,6 +55,7 @@ */ extern inline void get_mmu_context(struct task_struct *p) { +#ifdef CONFIG_EV5 static unsigned long asn_cache = ASN_FIRST_VERSION; struct mm_struct * mm = p->mm; unsigned long asn = mm->context; @@ -44,7 +67,7 @@ /* check if it's legal.. */ if ((asn & ~ASN_VERSION_MASK) > MAX_ASN) { /* start a new version, invalidate all old asn's */ - tbiap(); + tbiap(); imb(); asn_cache = (asn_cache & ASN_VERSION_MASK) + ASN_FIRST_VERSION; if (!asn_cache) asn_cache = ASN_FIRST_VERSION; @@ -53,6 +76,7 @@ mm->context = asn; /* full version + asn */ p->tss.asn = asn & ~ASN_VERSION_MASK; /* just asn */ } +#endif } #endif diff -u --recursive --new-file v1.3.55/linux/include/asm-alpha/page.h linux/include/asm-alpha/page.h --- v1.3.55/linux/include/asm-alpha/page.h Mon Dec 11 15:42:04 1995 +++ linux/include/asm-alpha/page.h Mon Jan 8 08:14:54 1996 @@ -51,7 +51,7 @@ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) -#define PAGE_OFFSET 0xFFFFFC0000000000 +#define PAGE_OFFSET 0xFFFFFC0000000000UL #define MAP_NR(addr) ((((unsigned long) (addr)) - PAGE_OFFSET) >> PAGE_SHIFT) #endif /* __KERNEL__ */ diff -u --recursive --new-file v1.3.55/linux/include/asm-alpha/pgtable.h linux/include/asm-alpha/pgtable.h --- v1.3.55/linux/include/asm-alpha/pgtable.h Sat Jan 6 19:10:40 1996 +++ linux/include/asm-alpha/pgtable.h Mon Jan 8 08:14:54 1996 @@ -9,13 +9,7 @@ * in (currently 8192). */ -extern void tbi(long type, ...); - -#define tbisi(x) tbi(1,(x)) -#define tbisd(x) tbi(2,(x)) -#define tbis(x) tbi(3,(x)) -#define tbiap() tbi(-1) -#define tbia() tbi(-2) +#include /* * Invalidate current user mapping. diff -u --recursive --new-file v1.3.55/linux/include/asm-alpha/system.h linux/include/asm-alpha/system.h --- v1.3.55/linux/include/asm-alpha/system.h Sat Nov 25 19:04:48 1995 +++ linux/include/asm-alpha/system.h Mon Jan 8 13:09:54 1996 @@ -59,6 +59,8 @@ alpha_switch_to((unsigned long) &(p)->tss - 0xfffffc0000000000); \ } while (0) +extern void imb(void); + #define mb() \ __asm__ __volatile__("mb": : :"memory") @@ -96,6 +98,17 @@ #define sti() setipl(0) #define save_flags(flags) do { flags = getipl(); } while (0) #define restore_flags(flags) setipl(flags) + +/* + * TB routines.. + */ +extern void tbi(long type, ...); + +#define tbisi(x) tbi(1,(x)) +#define tbisd(x) tbi(2,(x)) +#define tbis(x) tbi(3,(x)) +#define tbiap() tbi(-1) +#define tbia() tbi(-2) /* * Give prototypes to shut up gcc. diff -u --recursive --new-file v1.3.55/linux/include/linux/mm.h linux/include/linux/mm.h --- v1.3.55/linux/include/linux/mm.h Thu Jan 4 21:54:59 1996 +++ linux/include/linux/mm.h Sun Jan 7 18:27:06 1996 @@ -98,8 +98,7 @@ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot); int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags); void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise); - unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, - unsigned long page, int write_access); + unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access); unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address, unsigned long page); int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *); diff -u --recursive --new-file v1.3.55/linux/include/linux/pagemap.h linux/include/linux/pagemap.h --- v1.3.55/linux/include/linux/pagemap.h Sat Jan 6 19:10:40 1996 +++ linux/include/linux/pagemap.h Mon Jan 8 08:14:54 1996 @@ -39,6 +39,16 @@ #define page_hash(inode,offset) page_hash_table[_page_hashfn(inode,offset)] +static inline int page_age_update(struct page * page, int accessed) +{ + unsigned int age = page->age; + if (accessed) + age |= PAGE_AGE_VALUE << 1; + age >>= 1; + page->age = age; + return age > (PAGE_AGE_VALUE >> 1); +} + static inline struct page * find_page(struct inode * inode, unsigned long offset) { struct page *page; diff -u --recursive --new-file v1.3.55/linux/mm/filemap.c linux/mm/filemap.c --- v1.3.55/linux/mm/filemap.c Sat Jan 6 19:10:42 1996 +++ linux/mm/filemap.c Mon Jan 8 08:14:54 1996 @@ -88,7 +88,7 @@ if (page->count != 1) age |= PAGE_AGE_VALUE << 1; page->age = age >> 1; - if (age <= PAGE_AGE_VALUE/2) { + if (age < PAGE_AGE_VALUE) { remove_page_from_hash_queue(page); remove_page_from_inode_queue(page); free_page(page_address(page)); @@ -147,51 +147,47 @@ } /* - * Find a cached page and wait for it to become up-to-date, return - * the page address. - * - * If no cached page can be found, create one using the supplied - * new page instead (and return zero to indicate that we used the - * supplied page in doing so). + * Try to read ahead in the file. "page_cache" is a potentially free page + * that we could use for the cache (if it is 0 we can try to create one, + * this is all overlapped with the IO on the previous page finishing anyway) */ -static unsigned long fill_page(struct inode * inode, unsigned long offset, unsigned long newpage) +static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache) { struct page * page; + if (!page_cache) { + page_cache = __get_free_page(GFP_KERNEL); + if (!page_cache) + return 0; + } +#ifdef readahead_makes_sense_due_to_asynchronous_reads + offset = (offset + PAGE_SIZE) & PAGE_MASK; page = find_page(inode, offset); if (page) { - if (!page->uptodate) - sleep_on(&page->wait); - return page_address(page); + page->count--; + return page_cache; } - page = mem_map + MAP_NR(newpage); + /* + * Ok, add the new page to the hash-queues... + */ + page = mem_map + MAP_NR(page_cache); page->count++; page->uptodate = 0; page->error = 0; page->offset = offset; add_page_to_inode_queue(inode, page); add_page_to_hash_queue(inode, page); - inode->i_op->readpage(inode, page); - page->uptodate = 1; - wake_up(&page->wait); - return 0; -} -/* - * Try to read ahead in the file. "page_cache" is a potentially free page - * that we could use for the cache (if it is 0 we can try to create one, - * this is all overlapped with the IO on the previous page finishing anyway) - */ -static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache) -{ - if (!page_cache) - page_cache = __get_free_page(GFP_KERNEL); - offset = (offset + PAGE_SIZE) & PAGE_MASK; - /* - * read-ahead is not implemented yet, but this is - * where we should start.. + /* + * And start IO on it.. + * (this should be asynchronous, but currently isn't) */ + inode->i_op->readpage(inode, page); + free_page(page_cache); + return 0; +#else return page_cache; +#endif } /* @@ -294,41 +290,63 @@ } /* + * Find a cached page and wait for it to become up-to-date, return + * the page address. + */ +static inline unsigned long fill_page(struct inode * inode, unsigned long offset) +{ + struct page * page; + unsigned long new_page; + + page = find_page(inode, offset); + if (page) + goto found_page; + new_page = __get_free_page(GFP_KERNEL); + page = find_page(inode, offset); + if (page) { + if (new_page) + free_page(new_page); + goto found_page; + } + if (!new_page) + return 0; + page = mem_map + MAP_NR(new_page); + new_page = 0; + page->count++; + page->uptodate = 0; + page->error = 0; + page->offset = offset; + add_page_to_inode_queue(inode, page); + add_page_to_hash_queue(inode, page); + inode->i_op->readpage(inode, page); +found_page: + if (!page->uptodate) + sleep_on(&page->wait); + return page_address(page); +} + +/* * Semantics for shared and private memory areas are different past the end * of the file. A shared mapping past the last page of the file is an error * and results in a SIBGUS, while a private mapping just maps in a zero page. */ -static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, - unsigned long page, int no_share) +static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share) { unsigned long offset; struct inode * inode = area->vm_inode; - unsigned long new_page; + unsigned long page; offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset; if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm) - send_sig(SIGBUS, current, 1); + return 0; - new_page = fill_page(inode, offset, page); - if (new_page) { - if (no_share) { - memcpy((void *) page, (void *) new_page, PAGE_SIZE); - free_page(new_page); - return page; - } + page = fill_page(inode, offset); + if (page && no_share) { + unsigned long new_page = __get_free_page(GFP_KERNEL); + if (new_page) + memcpy((void *) new_page, (void *) page, PAGE_SIZE); free_page(page); return new_page; - } - - if (no_share) { - new_page = __get_free_page(GFP_USER); - if (!new_page) { - oom(current); - new_page = pte_page(BAD_PAGE); - } - memcpy((void *) new_page, (void *) page, PAGE_SIZE); - free_page(page); - page = new_page; } return page; } diff -u --recursive --new-file v1.3.55/linux/mm/memory.c linux/mm/memory.c --- v1.3.55/linux/mm/memory.c Thu Jan 4 21:54:59 1996 +++ linux/mm/memory.c Sun Jan 7 18:27:06 1996 @@ -905,21 +905,19 @@ get_empty_page(tsk, vma, page_table); return; } - page = __get_free_page(GFP_KERNEL); - if (!page) { - oom(tsk); - put_page(page_table, BAD_PAGE); - return; - } ++tsk->maj_flt; ++vma->vm_mm->rss; /* - * The fourth argument is "no_share", which tells the low-level code + * The third argument is "no_share", which tells the low-level code * to copy, not share the page even if sharing is possible. It's * essentially an early COW detection */ - page = vma->vm_ops->nopage(vma, address, page, - write_access && !(vma->vm_flags & VM_SHARED)); + page = vma->vm_ops->nopage(vma, address, write_access && !(vma->vm_flags & VM_SHARED)); + if (!page) { + send_sig(SIGBUS, current, 1); + put_page(page_table, BAD_PAGE); + return; + } /* * This silly early PAGE_DIRTY setting removes a race * due to the bad i386 page protection. But it's valid diff -u --recursive --new-file v1.3.55/linux/mm/swap.c linux/mm/swap.c --- v1.3.55/linux/mm/swap.c Thu Jan 4 21:54:59 1996 +++ linux/mm/swap.c Mon Jan 8 08:14:54 1996 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include /* for cli()/sti() */ @@ -420,17 +421,19 @@ pte_t pte; unsigned long entry; unsigned long page; + struct page * page_map; pte = *page_table; if (!pte_present(pte)) return 0; page = pte_page(pte); - if (page >= high_memory) + if (MAP_NR(page) >= MAP_NR(high_memory)) return 0; if (page >= limit) return 0; - if (mem_map[MAP_NR(page)].reserved) + page_map = mem_map + MAP_NR(page); + if (page_map->reserved) return 0; /* Deal with page aging. Pages age from being unused; they * rejuvinate on being accessed. Only swap old pages (age==0 @@ -438,11 +441,10 @@ if ((pte_dirty(pte) && delete_from_swap_cache(page)) || pte_young(pte)) { set_pte(page_table, pte_mkold(pte)); - touch_page(page); + page_age_update(page_map, 1); return 0; } - age_page(page); - if (age_of(page)) + if (page_age_update(page_map, pte_young(pte))) return 0; if (pte_dirty(pte)) { if (vma->vm_ops && vma->vm_ops->swapout) { @@ -451,7 +453,7 @@ if (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table)) kill_proc(pid, SIGBUS, 1); } else { - if (mem_map[MAP_NR(page)].count != 1) + if (page_map->count != 1) return 0; if (!(entry = get_swap_page())) return 0; @@ -465,7 +467,7 @@ return 1; /* we slept: the process may not exist any more */ } if ((entry = find_in_swap_cache(page))) { - if (mem_map[MAP_NR(page)].count != 1) { + if (page_map->count != 1) { set_pte(page_table, pte_mkdirty(pte)); printk("Aiee.. duplicated cached swap-cache entry\n"); return 0;