diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/arch/i386/mm/init.c linux/arch/i386/mm/init.c --- /opt/kernel/linux-2.4.5/arch/i386/mm/init.c Sat Apr 21 01:15:20 2001 +++ linux/arch/i386/mm/init.c Sun May 27 17:50:26 2001 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -348,12 +349,15 @@ kmap_init(); #endif { - unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; - unsigned int max_dma, high, low; + unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0, 0}; + unsigned int max_dma, max_dma32, high, low, high32; max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; + max_dma32 = PCI_MAX_DMA32 >> PAGE_SHIFT; low = max_low_pfn; - high = highend_pfn; + high32 = high = highend_pfn; + if (high32 > max_dma32) + high32 = max_dma32 + 1; /* first map in HIGHMEM */ if (low < max_dma) zones_size[ZONE_DMA] = low; @@ -361,12 +365,12 @@ zones_size[ZONE_DMA] = max_dma; zones_size[ZONE_NORMAL] = low - max_dma; #ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = high - low; + zones_size[ZONE_DMA32] = high32 - low; + zones_size[ZONE_HIGHMEM] = high - high32; #endif } free_area_init(zones_size); } - return; } /* diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/include/linux/mm.h linux/include/linux/mm.h --- /opt/kernel/linux-2.4.5/include/linux/mm.h Sat May 26 13:30:50 2001 +++ linux/include/linux/mm.h Mon May 28 02:07:41 2001 @@ -476,8 +476,10 @@ #define __GFP_IO 0x04 #define __GFP_DMA 0x08 #ifdef CONFIG_HIGHMEM -#define __GFP_HIGHMEM 0x10 +#define __GFP_DMA32 0x10 +#define __GFP_HIGHMEM 0x20 #else +#define __GFP_DMA32 0x0 /* noop */ #define __GFP_HIGHMEM 0x0 /* noop */ #endif diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/include/linux/mmzone.h linux/include/linux/mmzone.h --- /opt/kernel/linux-2.4.5/include/linux/mmzone.h Sat May 26 13:30:50 2001 +++ linux/include/linux/mmzone.h Sun May 27 18:26:59 2001 @@ -27,7 +27,8 @@ * * ZONE_DMA < 16 MB ISA DMA capable memory * ZONE_NORMAL 16-896 MB direct mapped by the kernel - * ZONE_HIGHMEM > 896 MB only page cache and user processes + * ZONE_DMA32 > 892MB < 4GB For 32-bit DMA + * ZONE_HIGHMEM > 4GB only page cache and user processes */ typedef struct zone_struct { /* @@ -62,8 +63,9 @@ #define ZONE_DMA 0 #define ZONE_NORMAL 1 -#define ZONE_HIGHMEM 2 -#define MAX_NR_ZONES 3 +#define ZONE_DMA32 2 +#define ZONE_HIGHMEM 3 +#define MAX_NR_ZONES 4 /* * One allocation request operates on a zonelist. A zonelist @@ -81,7 +83,7 @@ int gfp_mask; } zonelist_t; -#define NR_GFPINDEX 0x20 +#define NR_GFPINDEX 0x40 /* * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/mm/highmem.c linux/mm/highmem.c --- /opt/kernel/linux-2.4.5/mm/highmem.c Sat May 26 13:30:50 2001 +++ linux/mm/highmem.c Mon May 28 00:04:31 2001 @@ -180,20 +180,19 @@ static inline void copy_from_high_bh (struct buffer_head *to, struct buffer_head *from) { - struct page *p_from; - char *vfrom; + char *vfrom, *vto; unsigned long flags; - p_from = from->b_page; - /* * Since this can be executed from IRQ context, reentrance * on the same CPU must be avoided: */ __save_flags(flags); __cli(); - vfrom = kmap_atomic(p_from, KM_BOUNCE_WRITE); - memcpy(to->b_data, vfrom + bh_offset(from), to->b_size); + vfrom = kmap_atomic(from->b_page, KM_BOUNCE_WRITE); + vto = kmap_atomic(to->b_page, KM_BOUNCE_WRITE); + memcpy(vto, vfrom + bh_offset(from), to->b_size); + kunmap_atomic(vto, KM_BOUNCE_WRITE); kunmap_atomic(vfrom, KM_BOUNCE_WRITE); __restore_flags(flags); } @@ -201,15 +200,15 @@ static inline void copy_to_high_bh_irq (struct buffer_head *to, struct buffer_head *from) { - struct page *p_to; - char *vto; + char *vto, *vfrom; unsigned long flags; - p_to = to->b_page; __save_flags(flags); __cli(); - vto = kmap_atomic(p_to, KM_BOUNCE_READ); - memcpy(vto + bh_offset(to), from->b_data, to->b_size); + vto = kmap_atomic(to->b_page, KM_BOUNCE_READ); + vfrom = kmap_atomic(from->b_page, KM_BOUNCE_READ); + memcpy(vto + bh_offset(to), vfrom, to->b_size); + kunmap_atomic(vfrom, KM_BOUNCE_READ); kunmap_atomic(vto, KM_BOUNCE_READ); __restore_flags(flags); } @@ -296,13 +295,13 @@ bounce_end_io(bh, uptodate); } -struct page *alloc_bounce_page (void) +struct page *alloc_bounce_page (int gfp_mask) { struct list_head *tmp; struct page *page; repeat_alloc: - page = alloc_page(GFP_BUFFER); + page = alloc_page(gfp_mask); if (page) return page; /* @@ -372,14 +371,12 @@ goto repeat_alloc; } -struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig) +struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig, + int gfp_mask) { struct page *page; struct buffer_head *bh; - if (!PageHighMem(bh_orig->b_page)) - return bh_orig; - bh = alloc_bounce_bh(); /* * This is wasteful for 1k buffers, but this is a stopgap measure @@ -387,7 +384,7 @@ * things immensly. On boxes with more than 4GB RAM this should * not be an issue anyway. */ - page = alloc_bounce_page(); + page = alloc_bounce_page(gfp_mask); set_bh_page(bh, page, 0); diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/mm/page_alloc.c linux/mm/page_alloc.c --- /opt/kernel/linux-2.4.5/mm/page_alloc.c Sat May 26 13:30:50 2001 +++ linux/mm/page_alloc.c Sun May 27 23:47:22 2001 @@ -598,6 +598,7 @@ while (pgdat) { pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; + pages += pgdat->node_zones[ZONE_DMA32].free_pages; pgdat = pgdat->node_next; } return pages; @@ -683,6 +684,8 @@ k = ZONE_NORMAL; if (i & __GFP_HIGHMEM) k = ZONE_HIGHMEM; + if (i & __GFP_DMA32) + k = ZONE_DMA32; if (i & __GFP_DMA) k = ZONE_DMA; @@ -700,6 +703,14 @@ #endif zonelist->zones[j++] = zone; } + case ZONE_DMA32: + zone = pgdat->node_zones + ZONE_DMA32; + if (zone->size) { +#ifndef CONFIG_HIGHMEM + BUG(); +#endif + zonelist->zones[j++] = zone; + } case ZONE_NORMAL: zone = pgdat->node_zones + ZONE_NORMAL; if (zone->size) @@ -833,8 +844,11 @@ for (i = 0; i < size; i++) { struct page *page = mem_map + offset + i; page->zone = zone; - if (j != ZONE_HIGHMEM) + if (j != ZONE_HIGHMEM && j != ZONE_DMA32) { page->virtual = __va(zone_start_paddr); + } else + page->virtual = NULL; + zone_start_paddr += PAGE_SIZE; }