diff -urN 2.2.10/arch/i386/mm/init.c 2.2.10-bigmem/arch/i386/mm/init.c --- 2.2.10/arch/i386/mm/init.c Fri Aug 6 17:07:50 1999 +++ 2.2.10-bigmem/arch/i386/mm/init.c Fri Aug 6 15:46:00 1999 @@ -501,7 +501,7 @@ clear_bit(PG_reserved, &mem_map[PHYSMAP_NR(tmp)].flags); set_bit(PG_BIGMEM, &mem_map[PHYSMAP_NR(tmp)].flags); atomic_set(&mem_map[PHYSMAP_NR(tmp)].count, 1); - free_physpage(tmp); + free_page(tmp + PAGE_OFFSET); bigpages++; } #endif diff -urN 2.2.10/include/linux/mm.h 2.2.10-bigmem/include/linux/mm.h --- 2.2.10/include/linux/mm.h Fri Aug 6 17:07:50 1999 +++ 2.2.10-bigmem/include/linux/mm.h Fri Aug 6 15:43:51 1999 @@ -282,11 +282,6 @@ #define free_page(addr) free_pages((addr),0) extern void FASTCALL(free_pages(unsigned long addr, unsigned long order)); extern void FASTCALL(__free_page(struct page *)); -#ifdef CONFIG_BIGMEM -#define free_physpage(addr) free_physpages((addr),0) -extern void FASTCALL(free_physpages(unsigned long addr, unsigned long order)); -#endif - extern void show_free_areas(void); extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page, unsigned long address); diff -urN 2.2.10/mm/page_alloc.c 2.2.10-bigmem/mm/page_alloc.c --- 2.2.10/mm/page_alloc.c Fri Aug 6 17:07:50 1999 +++ 2.2.10-bigmem/mm/page_alloc.c Fri Aug 6 16:54:08 1999 @@ -145,25 +145,6 @@ } } -#ifdef CONFIG_BIGMEM -void free_physpages(unsigned long addr, unsigned long order) -{ - unsigned long map_nr = PHYSMAP_NR(addr); - - if (map_nr < max_mapnr) { - mem_map_t * map = mem_map + map_nr; - if (PageReserved(map)) - return; - if (atomic_dec_and_test(&map->count)) { - if (PageSwapCache(map)) - panic ("Freeing swap cache pages"); - map->flags &= ~(1 << PG_referenced); - free_pages_ok(map_nr, order); - return; - } - } -} -#endif void free_pages(unsigned long addr, unsigned long order) { @@ -190,47 +171,29 @@ change_bit((index) >> (1+(order)), (area)->map) #define CAN_DMA(x) (PageDMA(x)) #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT)) + #ifdef CONFIG_BIGMEM -#define RMQUEUE(order, gfp_mask) \ -do { struct free_area_struct * area = free_area+BIGMEM_LISTS_OFFSET+order; \ - unsigned long new_order = order; \ - if (gfp_mask & __GFP_BIGMEM) { \ - do { struct page *prev = memory_head(area), *ret = prev->next; \ - if (memory_head(area) != ret) { \ - unsigned long map_nr; \ - (prev->next = ret->next)->prev = prev; \ - map_nr = ret - mem_map; \ - MARK_USED(map_nr, new_order, area); \ - nr_free_pages -= 1 << order; \ - nr_free_bigmem -= 1 << order; \ - EXPAND(ret, map_nr, order, new_order, area); \ - spin_unlock_irqrestore(&page_alloc_lock, flags); \ - return ADDRESS(map_nr); \ - } \ - new_order++; area++; \ - } while (new_order < NR_MEM_LISTS); \ - new_order = order; \ - } \ - area = free_area+order; \ +#define RMQUEUEBIG(order, gfp_mask) \ +if (gfp_mask & __GFP_BIGMEM) { \ + struct free_area_struct * area = free_area+order+BIGMEM_LISTS_OFFSET; \ + unsigned long new_order = order; \ do { struct page *prev = memory_head(area), *ret = prev->next; \ - while (memory_head(area) != ret) { \ - if (!(gfp_mask & __GFP_DMA) || CAN_DMA(ret)) { \ - unsigned long map_nr; \ - (prev->next = ret->next)->prev = prev; \ - map_nr = ret - mem_map; \ - MARK_USED(map_nr, new_order, area); \ - nr_free_pages -= 1 << order; \ - EXPAND(ret, map_nr, order, new_order, area); \ - spin_unlock_irqrestore(&page_alloc_lock, flags); \ - return ADDRESS(map_nr); \ - } \ - prev = ret; \ - ret = ret->next; \ + if (memory_head(area) != ret) { \ + unsigned long map_nr; \ + (prev->next = ret->next)->prev = prev; \ + map_nr = ret - mem_map; \ + MARK_USED(map_nr, new_order, area); \ + nr_free_pages -= 1 << order; \ + nr_free_bigmem -= 1 << order; \ + EXPAND(ret, map_nr, order, new_order, area); \ + spin_unlock_irqrestore(&page_alloc_lock, flags); \ + return ADDRESS(map_nr); \ } \ new_order++; area++; \ } while (new_order < NR_MEM_LISTS); \ -} while (0) -#else +} +#endif + #define RMQUEUE(order, gfp_mask) \ do { struct free_area_struct * area = free_area+order; \ unsigned long new_order = order; \ @@ -252,7 +215,7 @@ new_order++; area++; \ } while (new_order < NR_MEM_LISTS); \ } while (0) -#endif + #define EXPAND(map,index,low,high,area) \ do { unsigned long size = 1 << high; \ while (high > low) { \ @@ -312,6 +275,9 @@ } ok_to_allocate: spin_lock_irqsave(&page_alloc_lock, flags); +#ifdef CONFIG_BIGMEM + RMQUEUEBIG(order, gfp_mask); +#endif RMQUEUE(order, gfp_mask); spin_unlock_irqrestore(&page_alloc_lock, flags);