## Automatically generated incremental diff ## From: linux-2.4.21-bk13 ## To: linux-2.4.21-bk14 ## Robot: $Id: make-incremental-diff,v 1.11 2002/02/20 02:59:33 hpa Exp $ diff -urN linux-2.4.21-bk13/Makefile linux-2.4.21-bk14/Makefile --- linux-2.4.21-bk13/Makefile 2003-07-19 02:53:45.000000000 -0700 +++ linux-2.4.21-bk14/Makefile 2003-07-19 02:53:54.000000000 -0700 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 21 -EXTRAVERSION = -bk13 +EXTRAVERSION = -bk14 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff -urN linux-2.4.21-bk13/drivers/ide/pci/alim15x3.c linux-2.4.21-bk14/drivers/ide/pci/alim15x3.c --- linux-2.4.21-bk13/drivers/ide/pci/alim15x3.c 2003-06-13 07:51:33.000000000 -0700 +++ linux-2.4.21-bk14/drivers/ide/pci/alim15x3.c 2003-07-19 02:53:55.000000000 -0700 @@ -525,10 +525,14 @@ drive->init_speed = 0; + /* Set reasonable PIO timings first - some of them are needed + for DMA as well. */ + hwif->tuneproc(drive, 255); + if ((id->capability & 1) != 0 && drive->autodma) { /* Consult the list of known "bad" drives */ if (hwif->ide_dma_bad_drive(drive)) - goto ata_pio; + goto no_dma_set; if ((id->field_valid & 4) && (m5229_revision >= 0xC2)) { if (id->dma_ultra & hwif->ultra_mask) { /* Force if Capable UltraDMA */ @@ -550,11 +554,9 @@ if (!config_chipset_for_dma(drive)) goto no_dma_set; } else { - goto ata_pio; + goto no_dma_set; } } else { -ata_pio: - hwif->tuneproc(drive, 255); no_dma_set: return hwif->ide_dma_off_quietly(drive); } diff -urN linux-2.4.21-bk13/fs/buffer.c linux-2.4.21-bk14/fs/buffer.c --- linux-2.4.21-bk13/fs/buffer.c 2003-07-19 02:53:50.000000000 -0700 +++ linux-2.4.21-bk14/fs/buffer.c 2003-07-19 02:53:56.000000000 -0700 @@ -487,7 +487,6 @@ ret = do_fdatasync(file); up(&inode->i_sem); -out_putf: fput(file); out: return ret; diff -urN linux-2.4.21-bk13/include/linux/vmalloc.h linux-2.4.21-bk14/include/linux/vmalloc.h --- linux-2.4.21-bk13/include/linux/vmalloc.h 2002-11-28 15:53:15.000000000 -0800 +++ linux-2.4.21-bk14/include/linux/vmalloc.h 2003-07-19 02:53:57.000000000 -0700 @@ -21,6 +21,9 @@ extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags); extern void vfree(void * addr); +#define vunmap(addr) vfree(addr) +extern void * vmap(struct page **pages, int count, + unsigned long flags, pgprot_t prot); extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot); extern long vread(char *buf, char *addr, unsigned long count); extern void vmfree_area_pages(unsigned long address, unsigned long size); diff -urN linux-2.4.21-bk13/kernel/ksyms.c linux-2.4.21-bk14/kernel/ksyms.c --- linux-2.4.21-bk13/kernel/ksyms.c 2003-07-19 02:53:50.000000000 -0700 +++ linux-2.4.21-bk14/kernel/ksyms.c 2003-07-19 02:53:57.000000000 -0700 @@ -112,6 +112,7 @@ EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(__vmalloc); +EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(remap_page_range); diff -urN linux-2.4.21-bk13/kernel/sched.c linux-2.4.21-bk14/kernel/sched.c --- linux-2.4.21-bk13/kernel/sched.c 2003-06-13 07:51:39.000000000 -0700 +++ linux-2.4.21-bk14/kernel/sched.c 2003-07-19 02:53:57.000000000 -0700 @@ -282,7 +282,7 @@ target_tsk = tsk; } } else { - if (oldest_idle == -1ULL) { + if (oldest_idle == (cycles_t)-1) { int prio = preemption_goodness(tsk, p, cpu); if (prio > max_prio) { @@ -294,7 +294,7 @@ } tsk = target_tsk; if (tsk) { - if (oldest_idle != -1ULL) { + if (oldest_idle != (cycles_t)-1) { best_cpu = tsk->processor; goto send_now_idle; } diff -urN linux-2.4.21-bk13/mm/vmalloc.c linux-2.4.21-bk14/mm/vmalloc.c --- linux-2.4.21-bk13/mm/vmalloc.c 2003-06-13 07:51:39.000000000 -0700 +++ linux-2.4.21-bk14/mm/vmalloc.c 2003-07-19 02:53:57.000000000 -0700 @@ -93,7 +93,8 @@ } static inline int alloc_area_pte (pte_t * pte, unsigned long address, - unsigned long size, int gfp_mask, pgprot_t prot) + unsigned long size, int gfp_mask, + pgprot_t prot, struct page ***pages) { unsigned long end; @@ -103,9 +104,20 @@ end = PMD_SIZE; do { struct page * page; - spin_unlock(&init_mm.page_table_lock); - page = alloc_page(gfp_mask); - spin_lock(&init_mm.page_table_lock); + + if (!pages) { + spin_unlock(&init_mm.page_table_lock); + page = alloc_page(gfp_mask); + spin_lock(&init_mm.page_table_lock); + } else { + page = (**pages); + (*pages)++; + + /* Add a reference to the page so we can free later */ + if (page) + atomic_inc(&page->count); + + } if (!pte_none(*pte)) printk(KERN_ERR "alloc_area_pte: page already exists\n"); if (!page) @@ -117,7 +129,9 @@ return 0; } -static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot) +static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, + unsigned long size, int gfp_mask, + pgprot_t prot, struct page ***pages) { unsigned long end; @@ -129,7 +143,8 @@ pte_t * pte = pte_alloc(&init_mm, pmd, address); if (!pte) return -ENOMEM; - if (alloc_area_pte(pte, address, end - address, gfp_mask, prot)) + if (alloc_area_pte(pte, address, end - address, + gfp_mask, prot, pages)) return -ENOMEM; address = (address + PMD_SIZE) & PMD_MASK; pmd++; @@ -137,8 +152,11 @@ return 0; } -inline int vmalloc_area_pages (unsigned long address, unsigned long size, - int gfp_mask, pgprot_t prot) +static inline int __vmalloc_area_pages (unsigned long address, + unsigned long size, + int gfp_mask, + pgprot_t prot, + struct page ***pages) { pgd_t * dir; unsigned long end = address + size; @@ -155,7 +173,7 @@ break; ret = -ENOMEM; - if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot)) + if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages)) break; address = (address + PGDIR_SIZE) & PGDIR_MASK; @@ -168,6 +186,12 @@ return ret; } +int vmalloc_area_pages(unsigned long address, unsigned long size, + int gfp_mask, pgprot_t prot) +{ + return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL); +} + struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) { unsigned long addr, next; @@ -246,7 +270,30 @@ if (!area) return NULL; addr = area->addr; - if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) { + if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, + prot, NULL)) { + vfree(addr); + return NULL; + } + return addr; +} + +void * vmap(struct page **pages, int count, + unsigned long flags, pgprot_t prot) +{ + void * addr; + struct vm_struct *area; + unsigned long size = count << PAGE_SHIFT; + + if (!size || size > (max_mapnr << PAGE_SHIFT)) + return NULL; + area = get_vm_area(size, flags); + if (!area) { + return NULL; + } + addr = area->addr; + if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0, + prot, &pages)) { vfree(addr); return NULL; }