diff -ur 2.3.12-lruswap/fs/buffer.c 2.3.12-lru/fs/buffer.c --- 2.3.12-lruswap/fs/buffer.c Wed Aug 4 19:17:22 1999 +++ 2.3.12-lru/fs/buffer.c Wed Aug 4 19:24:08 1999 @@ -1953,7 +1953,7 @@ page_map = mem_map + MAP_NR(page); page_map->buffers = bh; - lru_cache_add(page_map, 0); + lru_cache_add(page_map); atomic_add(PAGE_SIZE, &buffermem); return 1; } diff -ur 2.3.12-lruswap/include/linux/swap.h 2.3.12-lru/include/linux/swap.h --- 2.3.12-lruswap/include/linux/swap.h Wed Aug 4 19:17:22 1999 +++ 2.3.12-lru/include/linux/swap.h Wed Aug 4 19:15:26 1999 @@ -65,7 +65,7 @@ extern int nr_swap_pages; extern int nr_free_pages; extern int nr_lru_pages; -extern struct list_head lru_cache, lru_swap_cache; +extern struct list_head lru_cache; extern atomic_t nr_async_pages; extern struct inode swapper_inode; extern atomic_t page_cache_size; @@ -168,12 +168,12 @@ /* * Helper macros for lru_pages handling. */ -#define lru_cache_add(page, swap_cache) \ -do { \ - spin_lock(&pagemap_lru_lock); \ - list_add(&(page)->lru, !(swap_cache) ? &lru_cache:&lru_swap_cache); \ - nr_lru_pages++; \ - spin_unlock(&pagemap_lru_lock); \ +#define lru_cache_add(page) \ +do { \ + spin_lock(&pagemap_lru_lock); \ + list_add(&(page)->lru, &lru_cache); \ + nr_lru_pages++; \ + spin_unlock(&pagemap_lru_lock); \ } while (0) #define lru_cache_del(page) \ diff -ur 2.3.12-lruswap/mm/filemap.c 2.3.12-lru/mm/filemap.c --- 2.3.12-lruswap/mm/filemap.c Wed Aug 4 19:17:23 1999 +++ 2.3.12-lru/mm/filemap.c Wed Aug 4 19:23:14 1999 @@ -223,22 +223,25 @@ extern atomic_t too_many_dirty_buffers; -static inline int shrink_mmap_lru(struct list_head * lru, int * count, - int gfp_mask) +int shrink_mmap(int priority, int gfp_mask) { - int ret = 0; + int ret = 0, count; LIST_HEAD(young); LIST_HEAD(old); LIST_HEAD(forget); struct list_head * page_lru, * dispose; struct page * page; - while (*count > 0 && (page_lru = lru->prev) != lru) + count = nr_lru_pages / (priority+1); + + spin_lock(&pagemap_lru_lock); + + while (count > 0 && (page_lru = lru_cache.prev) != &lru_cache) { page = list_entry(page_lru, struct page, lru); list_del(page_lru); - dispose = lru; + dispose = &lru_cache; if (test_and_clear_bit(PG_referenced, &page->flags)) /* Roll the page at the top of the lru list, * we could also be more aggressive putting @@ -252,7 +255,7 @@ if ((gfp_mask & __GFP_DMA) && !PageDMA(page)) goto dispose_continue; - (*count)--; + count--; dispose = &young; if (TryLockPage(page)) @@ -362,23 +365,8 @@ nr_lru_pages--; out: - list_splice(&young, lru); - list_splice(&old, lru->prev); - - return ret; -} - -int shrink_mmap(int priority, int gfp_mask) -{ - int ret = 0, count, i; - struct list_head * lru[2] = { &lru_swap_cache, &lru_cache, }; - - count = nr_lru_pages / (priority+1); - - spin_lock(&pagemap_lru_lock); - - for (i=0; count > 0 && !ret && i<2; i++) - ret = shrink_mmap_lru(lru[i], &count, gfp_mask); + list_splice(&young, &lru_cache); + list_splice(&old, lru_cache.prev); spin_unlock(&pagemap_lru_lock); @@ -506,7 +494,7 @@ page->offset = offset; add_page_to_inode_queue(inode, page); __add_page_to_hash_queue(page, hash); - lru_cache_add(page, PageSwapCache(page)); + lru_cache_add(page); } void add_to_page_cache(struct page * page, struct inode * inode, unsigned long offset) diff -ur 2.3.12-lruswap/mm/page_alloc.c 2.3.12-lru/mm/page_alloc.c --- 2.3.12-lruswap/mm/page_alloc.c Wed Aug 4 19:17:23 1999 +++ 2.3.12-lru/mm/page_alloc.c Wed Aug 4 19:16:33 1999 @@ -22,7 +22,6 @@ int nr_free_pages = 0; int nr_lru_pages; LIST_HEAD(lru_cache); -LIST_HEAD(lru_swap_cache); /* * Free area management