(1) fix incorrect virtual address calculation in kmap_atomic() (2) more compilefixes for arch/i386/mm/highmem.c (3) checking for alignment of kmap() and kmap_atomic() virtualspace in arch/i386/mm/init.c (4) rework fixmap enums yet again so kmap() and kmap_atomic() areas actually come out properly aligned (5) change FIXADDR_TOP to -PAGE_SIZE so the offset calculation isn't as easy to screw up (6) fix mismerge of pgd_ctor() bits that installed garbage pmd's on CONFIG_HIGHMEM64G diff -prauN pgcl-2.5.70-bk5-2/arch/i386/mm/highmem.c pgcl-2.5.70-bk5-3/arch/i386/mm/highmem.c --- pgcl-2.5.70-bk5-2/arch/i386/mm/highmem.c 2003-06-01 23:44:38.000000000 -0700 +++ pgcl-2.5.70-bk5-3/arch/i386/mm/highmem.c 2003-06-01 23:45:12.000000000 -0700 @@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page, enu return page_address(page); idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx*PAGE_SIZE); + vaddr = __fix_to_virt(FIX_KMAP_END) + PAGE_SIZE*idx; WARN_ON(vaddr > __fix_to_virt(FIX_KMAP_BEGIN)); WARN_ON(vaddr < __fix_to_virt(FIX_KMAP_END)); pfn = page_to_pfn(page); @@ -51,7 +51,7 @@ void *kmap_atomic(struct page *page, enu pgd = pgd_offset_k(addr); pmd = pmd_offset(pgd, addr); pte = pte_offset_kernel(pmd, addr); - for (k = 0; k < PAGE_MMUCOUNT, ++k, addr += MMUPAGE_SIZE) { + for (k = 0; k < PAGE_MMUCOUNT; ++k, addr += MMUPAGE_SIZE) { #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(pte[k])); #endif @@ -77,10 +77,10 @@ void kunmap_atomic(void *kvaddr, enum km return; } - lo = __fix_to_virt(FIX_KMAP_END) + PAGE_SIZE * idx; + lo = __fix_to_virt(FIX_KMAP_END) + PAGE_SIZE*idx; hi = lo + PAGE_SIZE; - BUG_ON(vaddr < lo || vaddr > hi);; + BUG_ON(vaddr < lo || vaddr > hi); /* * force other mappings to Oops if they'll try to access diff -prauN pgcl-2.5.70-bk5-2/arch/i386/mm/init.c pgcl-2.5.70-bk5-3/arch/i386/mm/init.c --- pgcl-2.5.70-bk5-2/arch/i386/mm/init.c 2003-05-31 04:59:16.000000000 -0700 +++ pgcl-2.5.70-bk5-3/arch/i386/mm/init.c 2003-06-01 23:45:12.000000000 -0700 @@ -508,6 +508,24 @@ void __init mem_init(void) printk("fixaddr: start = 0x%lx, end = 0x%lx\n", FIXADDR_START, FIXADDR_TOP); +#ifdef CONFIG_HIGHMEM + printk("FIX_KMAP_END == %lx\n", __fix_to_virt(FIX_KMAP_END)); + if (__fix_to_virt(FIX_KMAP_END) % PAGE_SIZE) + printk(KERN_CRIT "kmap_atomic() area misaligned!\n"); + + printk("FIX_KMAP_BEGIN == %lx\n", __fix_to_virt(FIX_KMAP_BEGIN)); + if ((__fix_to_virt(FIX_KMAP_BEGIN) + MMUPAGE_SIZE) % PAGE_SIZE) + printk(KERN_CRIT "kmap_atomic() area misaligned!\n"); + + printk("FIX_PKMAP_END == %lx\n", __fix_to_virt(FIX_PKMAP_END)); + if (__fix_to_virt(FIX_PKMAP_END) % PAGE_SIZE) + printk(KERN_CRIT "kmap() area misaligned!\n"); + + printk("FIX_PKMAP_BEGIN == %lx\n", __fix_to_virt(FIX_PKMAP_BEGIN)); + if ((__fix_to_virt(FIX_PKMAP_BEGIN) + MMUPAGE_SIZE) % PAGE_SIZE) + printk(KERN_CRIT "kmap() area misaligned!\n"); +#endif + #ifdef CONFIG_X86_PAE if (!cpu_has_pae) panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); diff -prauN pgcl-2.5.70-bk5-2/arch/i386/mm/pgtable.c pgcl-2.5.70-bk5-3/arch/i386/mm/pgtable.c --- pgcl-2.5.70-bk5-2/arch/i386/mm/pgtable.c 2003-06-01 23:44:38.000000000 -0700 +++ pgcl-2.5.70-bk5-3/arch/i386/mm/pgtable.c 2003-06-01 23:50:38.000000000 -0700 @@ -240,7 +240,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; - set_pgd(&pgd[i], __pgd(1 + __pa((u64)(u32)__va(pgd_val(pgd[i])-1)))); + else + set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; diff -prauN pgcl-2.5.70-bk5-2/include/asm-i386/fixmap.h pgcl-2.5.70-bk5-3/include/asm-i386/fixmap.h --- pgcl-2.5.70-bk5-2/include/asm-i386/fixmap.h 2003-05-31 04:59:16.000000000 -0700 +++ pgcl-2.5.70-bk5-3/include/asm-i386/fixmap.h 2003-06-01 23:45:12.000000000 -0700 @@ -28,21 +28,19 @@ * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. We allocate these special addresses - * from the end of virtual memory (0xfffff000) backwards. + * from the end of virtual memory (-PAGE_SIZE) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. * - * these 'compile-time allocated' memory buffers are - * fixed-size 4k pages. (or larger if used with an increment - * highger than 1) use fixmap_set(idx,phys) to associate - * physical memory with fixmap indices. + * These 'compile-time allocated' memory buffers are + * fixed-size MMUPAGE_SIZE-size pages. Use + * set_fixmap(idx, phys, prot) to associate physical memory with + * fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. - */ - -/* + * * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. @@ -52,14 +50,29 @@ #define LAST_PKMAP 1024 #define LAST_PKMAP_MASK (LAST_PKMAP-1) +/* + * FIXADDR stuff is used by highmem.c for kmapping, and various + * drivers for system devices for their io mappings. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap. + * + * leave a hole of exactly PAGE_SIZE at the top for CONFIG_HIGHMEM + * this makes things easier on core code; the math works out funny + * and I didn't care enough to conserve PAGE_SIZE - MMUPAGE_SIZE + * worth of virtualspace. + */ +#define FIXADDR_TOP (-PAGE_SIZE) +#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << MMUPAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << MMUPAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x) & MMUPAGE_MASK)) >> MMUPAGE_SHIFT) + enum fixed_addresses { - /* - * leave a hole of exactly PAGE_SIZE at the top for CONFIG_HIGHMEM - * this makes things easier on core code; the math works out funny - */ - FIX_HOLE = PAGE_MMUCOUNT > 1 ? PAGE_MMUCOUNT - 1 : 0, #ifdef CONFIG_HIGHMEM - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_BEGIN = 1, FIX_KMAP_END = FIX_KMAP_BEGIN+((KM_TYPE_NR*NR_CPUS+1)*PAGE_MMUCOUNT)-1, FIX_PKMAP_BEGIN, FIX_PKMAP_END = FIX_PKMAP_BEGIN + (LAST_PKMAP+1)*PAGE_MMUCOUNT - 1, @@ -111,19 +124,6 @@ extern void __set_fixmap (enum fixed_add #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) -/* - * used by vmalloc.c. - * - * Leave one empty page between vmalloc'ed areas and - * the start of the fixmap. - */ -#define FIXADDR_TOP (0xfffff000UL) -#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << MMUPAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) - -#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << MMUPAGE_SHIFT)) -#define __virt_to_fix(x) ((FIXADDR_TOP - ((x) & MMUPAGE_MASK)) >> MMUPAGE_SHIFT) - extern void __this_fixmap_does_not_exist(void); /*