--- 2.4.0-test1-ac22-classzone-VM/fs/buffer.c.~1~ Tue Jun 20 00:55:39 2000 +++ 2.4.0-test1-ac22-classzone-VM/fs/buffer.c Wed Jun 21 03:16:38 2000 @@ -2379,25 +2379,6 @@ #define BUFFER_BUSY_BITS ((1<b_count) | ((bh)->b_state & BUFFER_BUSY_BITS)) -static int sync_page_buffers(struct buffer_head * bh) -{ - struct buffer_head * tmp = bh; - - do { - if (buffer_dirty(tmp) && !buffer_locked(tmp)) - ll_rw_block(WRITE, 1, &tmp); - tmp = tmp->b_this_page; - } while (tmp != bh); - - do { - if (buffer_busy(tmp)) - return 1; - tmp = tmp->b_this_page; - } while (tmp != bh); - - return 0; -} - /* * try_to_free_buffers() checks if all the buffers on this particular page * are unused, and free's the page if so. @@ -2414,7 +2395,6 @@ struct buffer_head * tmp, * bh = page->buffers; int index = BUFSIZE_INDEX(bh->b_size); -again: spin_lock(&lru_list_lock); write_lock(&hash_table_lock); spin_lock(&free_list[index].lock); @@ -2460,8 +2440,7 @@ spin_unlock(&free_list[index].lock); write_unlock(&hash_table_lock); spin_unlock(&lru_list_lock); - if (!sync_page_buffers(bh)) - goto again; + wakeup_bdflush(0); return 0; } --- 2.4.0-test1-ac22-classzone-VM/include/linux/mmzone.h.~1~ Tue Jun 20 00:55:47 2000 +++ 2.4.0-test1-ac22-classzone-VM/include/linux/mmzone.h Tue Jun 20 17:21:55 2000 @@ -41,7 +41,6 @@ unsigned long pages_min, pages_low, pages_high; int nr_zone; char zone_wake_kswapd; - atomic_t free_before_allocate; /* * free areas of different sizes --- 2.4.0-test1-ac22-classzone-VM/mm/page_alloc.c.~1~ Tue Jun 20 00:55:48 2000 +++ 2.4.0-test1-ac22-classzone-VM/mm/page_alloc.c Tue Jun 20 17:21:44 2000 @@ -257,13 +257,6 @@ if (current->flags & PF_MEMALLOC) goto allocate_ok; - /* Somebody needs to free pages so we free some of our own. */ - if (atomic_read(&classzone->free_before_allocate)) { - spin_unlock_irqrestore(freelist_lock, flags); - try_to_free_pages(gfpmask_zone->gfp_mask, classzone); - spin_lock_irq(freelist_lock); - } - /* classzone based memory balancing */ free_pages = classzone->classzone_free_pages; if (free_pages > classzone->pages_low) { @@ -296,9 +289,7 @@ goto allocate_ok; spin_unlock_irqrestore(freelist_lock, flags); - atomic_inc(&classzone->free_before_allocate); freed = try_to_free_pages(gfpmask_zone->gfp_mask, classzone); - atomic_dec(&classzone->free_before_allocate); spin_lock_irq(freelist_lock); if (freed || gfpmask_zone->gfp_mask & __GFP_HIGH) @@ -598,7 +589,6 @@ zone->free_pages = 0; zone->zone_wake_kswapd = 0; zone->classzone_free_pages = 0; - atomic_set(&zone->free_before_allocate, 0); if (!size) continue; pgdat->nr_zones = j+1;