diff -ur linux-2.4.0/drivers/block/ll_rw_blk.c linux-2.4.0-loop/drivers/block/ll_rw_blk.c --- linux-2.4.0/drivers/block/ll_rw_blk.c Mon Jan 8 01:31:02 2001 +++ linux-2.4.0-loop/drivers/block/ll_rw_blk.c Mon Jan 8 01:31:27 2001 @@ -731,9 +731,7 @@ * driver. Create a bounce buffer if the buffer data points into * high memory - keep the original buffer otherwise. */ -#if CONFIG_HIGHMEM bh = create_bounce(rw, bh); -#endif /* look for a free request. */ /* diff -ur linux-2.4.0/drivers/block/loop.c linux-2.4.0-loop/drivers/block/loop.c --- linux-2.4.0/drivers/block/loop.c Fri Dec 29 23:07:21 2000 +++ linux-2.4.0-loop/drivers/block/loop.c Mon Jan 8 01:31:27 2001 @@ -31,6 +31,9 @@ * max_loop=<1-255> to the kernel on boot. * Erik I. Bolsų, , Oct 31, 1999 * + * Rip out request handling and turn loop into a block remapper. + * Jens Axboe , Nov 2000 + * * Still To Fix: * - Advisory locking is ignored here. * - Should use an own CAP_* category instead of CAP_SYS_ADMIN @@ -56,7 +59,9 @@ #include #include #include - +#include +#include +#include #include #include @@ -66,40 +71,27 @@ #define MAJOR_NR LOOP_MAJOR -#define DEVICE_NAME "loop" -#define DEVICE_REQUEST do_lo_request -#define DEVICE_NR(device) (MINOR(device)) -#define DEVICE_ON(device) -#define DEVICE_OFF(device) -#define DEVICE_NO_RANDOM -#define TIMEOUT_VALUE (6 * HZ) -#include - #include static int max_loop = 8; static struct loop_device *loop_dev; static int *loop_sizes; static int *loop_blksizes; static devfs_handle_t devfs_handle; /* For the directory */ +static kmem_cache_t *loop_bhp; -#define FALSE 0 -#define TRUE (!FALSE) - -/* - * Transfer functions - */ static int transfer_none(struct loop_device *lo, int cmd, char *raw_buf, - char *loop_buf, int size, int real_block) + char *loop_buf, int size, int real_block) { if (cmd == READ) memcpy(loop_buf, raw_buf, size); else memcpy(raw_buf, loop_buf, size); + return 0; } static int transfer_xor(struct loop_device *lo, int cmd, char *raw_buf, - char *loop_buf, int size, int real_block) + char *loop_buf, int size, int real_block) { char *in, *out, *key; int i, keysize; @@ -111,17 +103,18 @@ in = loop_buf; out = raw_buf; } + key = lo->lo_encrypt_key; keysize = lo->lo_encrypt_key_size; - for (i=0; i < size; i++) + for (i = 0; i < size; i++) *out++ = *in++ ^ key[(i & 511) % keysize]; return 0; } static int none_status(struct loop_device *lo, struct loop_info *info) { - return 0; -} + return 0; +} static int xor_status(struct loop_device *lo, struct loop_info *info) { @@ -133,7 +126,7 @@ struct loop_func_table none_funcs = { number: LO_CRYPT_NONE, transfer: transfer_none, - init: none_status + init: none_status, }; struct loop_func_table xor_funcs = { @@ -168,8 +161,7 @@ loop_sizes[lo->lo_number] = size; } -static int lo_send(struct loop_device *lo, char *data, int len, loff_t pos, - int blksize) +static int lo_send(struct loop_device *lo, char *data, int len, loff_t pos) { struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ struct address_space *mapping = lo->lo_dentry->d_inode->i_mapping; @@ -182,7 +174,7 @@ index = pos >> PAGE_CACHE_SHIFT; offset = pos & (PAGE_CACHE_SIZE - 1); while (len > 0) { - int IV = index * (PAGE_CACHE_SIZE/blksize) + offset/blksize; + int IV = index * (PAGE_CACHE_SIZE/lo->lo_blksize) + offset/lo->lo_blksize; size = PAGE_CACHE_SIZE - offset; if (size > len) size = len; @@ -190,10 +182,13 @@ page = grab_cache_page(mapping, index); if (!page) goto fail; + if (!PageLocked(page)) + PAGE_BUG(page); if (aops->prepare_write(file, page, offset, offset+size)) goto unlock; kaddr = page_address(page); - if ((lo->transfer)(lo, WRITE, kaddr+offset, data, size, IV)) + flush_dcache_page(page); + if (lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV)) goto write_fail; if (aops->commit_write(file, page, offset, offset+size)) goto unlock; @@ -203,6 +198,7 @@ index++; pos += size; UnlockPage(page); + deactivate_page(page); page_cache_release(page); } return 0; @@ -213,6 +209,7 @@ kunmap(page); unlock: UnlockPage(page); + deactivate_page(page); page_cache_release(page); fail: return -1; @@ -221,7 +218,6 @@ struct lo_read_data { struct loop_device *lo; char *data; - int blksize; }; static int lo_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) @@ -230,16 +226,15 @@ unsigned long count = desc->count; struct lo_read_data *p = (struct lo_read_data*)desc->buf; struct loop_device *lo = p->lo; - int IV = page->index * (PAGE_CACHE_SIZE/p->blksize) + offset/p->blksize; + int IV = page->index * (PAGE_CACHE_SIZE/lo->lo_blksize) + offset/lo->lo_blksize; if (size > count) size = count; kaddr = kmap(page); - if ((lo->transfer)(lo,READ,kaddr+offset,p->data,size,IV)) { + if (lo_do_transfer(lo, READ, kaddr + offset, p->data, size, IV)) { size = 0; - printk(KERN_ERR "loop: transfer error block %ld\n", - page->index); + printk(KERN_ERR "loop: transfer error block %ld\n",page->index); desc->error = -EINVAL; } kunmap(page); @@ -250,8 +245,7 @@ return size; } -static int lo_receive(struct loop_device *lo, char *data, int len, loff_t pos, - int blksize) +static int lo_receive(struct loop_device *lo, char *data, int len, loff_t pos) { struct file *file = lo->lo_backing_file; struct lo_read_data cookie; @@ -259,7 +253,6 @@ cookie.lo = lo; cookie.data = data; - cookie.blksize = blksize; desc.written = 0; desc.count = len; desc.buf = (char*)&cookie; @@ -268,135 +261,144 @@ return desc.error; } -static void do_lo_request(request_queue_t * q) +static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw) { - int block, offset, len, blksize, size; - char *dest_addr; - struct loop_device *lo; - struct buffer_head *bh; - struct request *current_request; loff_t pos; + int ret; -repeat: - INIT_REQUEST; - current_request=CURRENT; - blkdev_dequeue_request(current_request); - if (MINOR(current_request->rq_dev) >= max_loop) - goto error_out; - lo = &loop_dev[MINOR(current_request->rq_dev)]; - if (!lo->lo_dentry || !lo->transfer) - goto error_out; - if (current_request->cmd == WRITE) { - if (lo->lo_flags & LO_FLAGS_READ_ONLY) - goto error_out; - } else if (current_request->cmd != READ) { - printk(KERN_ERR "unknown loop device command (%d)?!?", - current_request->cmd); - goto error_out; - } + pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset; - dest_addr = current_request->buffer; - len = current_request->current_nr_sectors << 9; + if (rw == WRITE) + ret = lo_send(lo, bh->b_data, bh->b_size, pos); + else + ret = lo_receive(lo, bh->b_data, bh->b_size, pos); - blksize = BLOCK_SIZE; - if (blksize_size[MAJOR(lo->lo_device)]) { - blksize = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)]; - if (!blksize) - blksize = BLOCK_SIZE; - } + return ret; +} - if (lo->lo_flags & LO_FLAGS_DO_BMAP) - goto file_backed; +static void loop_delete_buffer(struct buffer_head *bh) +{ + if (PageHighMem(bh->b_page)) + kunmap(bh->b_page); - if (blksize < 512) { - block = current_request->sector * (512/blksize); - offset = 0; - } else { - block = current_request->sector / (blksize >> 9); - offset = (current_request->sector % (blksize >> 9)) << 9; - } - block += lo->lo_offset / blksize; - offset += lo->lo_offset % blksize; - if (offset >= blksize) { - block++; - offset -= blksize; - } - spin_unlock_irq(&io_request_lock); + __free_page(bh->b_page); + kmem_cache_free(loop_bhp, bh); +} - while (len > 0) { +/* + * when buffer i/o has completed, invoke transfer func + */ +static void loop_end_io_transfer(struct buffer_head *bh, int uptodate) +{ + struct loop_device *lo = &loop_dev[MINOR(bh->b_dev)]; + struct buffer_head *rbh = bh->b_private; + int rw = test_and_clear_bit(BH_Dirty, &bh->b_state); - size = blksize - offset; - if (size > len) - size = len; + if (rw == READ && uptodate) + uptodate = !lo_do_transfer(lo, READ, bh->b_data, rbh->b_data, + bh->b_size, bh->b_blocknr); - bh = getblk(lo->lo_device, block, blksize); - if (!bh) { - printk(KERN_ERR "loop: device %s: getblk(-, %d, %d) returned NULL", - kdevname(lo->lo_device), - block, blksize); - goto error_out_lock; - } - if (!buffer_uptodate(bh) && ((current_request->cmd == READ) || - (offset || (len < blksize)))) { - ll_rw_block(READ, 1, &bh); - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) { - brelse(bh); - goto error_out_lock; - } - } + rbh->b_end_io(rbh, uptodate); + loop_delete_buffer(bh); +} - if ((lo->transfer)(lo, current_request->cmd, - bh->b_data + offset, - dest_addr, size, block)) { - printk(KERN_ERR "loop: transfer error block %d\n", - block); - brelse(bh); - goto error_out_lock; - } +static struct buffer_head *loop_new_buffer(struct loop_device *lo, + struct buffer_head *rbh) +{ + struct buffer_head *bh; - if (current_request->cmd == WRITE) { - mark_buffer_uptodate(bh, 1); - mark_buffer_dirty(bh); - } - brelse(bh); - dest_addr += size; - len -= size; - offset = 0; - block++; + /* + * any better ideas? one could argue that since + * (BUF_BUFFER & __GFP_WAIT) kmem_cache_alloc should + * always return an object + */ + do { + if ((bh = kmem_cache_alloc(loop_bhp, SLAB_BUFFER))) + break; + run_task_queue(&tq_disk); + schedule_timeout(HZ); + } while (1); + memset(bh, 0, sizeof(*bh)); + + bh->b_size = rbh->b_size; + bh->b_rdev = lo->lo_device; + bh->b_state = (1 << BH_Req) | (1 << BH_Mapped) | (1 << BH_Lock); + + /* + * easy way out, although it does waste some memory for < PAGE_SIZE + * blocks... + */ + bh->b_page = alloc_page(GFP_BUFFER); + if (PageHighMem(bh->b_page)) + bh->b_data = kmap(bh->b_page); + else + bh->b_data = page_address(bh->b_page); + + bh->b_end_io = loop_end_io_transfer; + bh->b_private = rbh; + bh->b_rsector = rbh->b_rsector + (lo->lo_offset >> 9); + init_waitqueue_head(&bh->b_wait); + + return bh; +} + +static int loop_make_request(request_queue_t *q, int rw, struct buffer_head *rbh) +{ + struct buffer_head *bh; + struct loop_device *lo; + + if (MINOR(rbh->b_dev) >= max_loop) + goto err; + + lo = &loop_dev[MINOR(rbh->b_dev)]; + if (!lo->lo_dentry) + goto err; + + if (rw == WRITE) { + if (lo->lo_flags & LO_FLAGS_READ_ONLY) + goto err; + } else if (rw == READA) { + rw = READ; + } else if (rw != READ) { + printk(KERN_ERR "loop: unknown command (%d)\n", rw); + goto err; + } + + rbh = create_bounce(rw, rbh); + + /* + * file backed + */ + if (lo->lo_flags & LO_FLAGS_DO_BMAP) { + if (do_bh_filebacked(lo, rbh, rw)) + goto err; + rbh->b_end_io(rbh, 1); + return 0; } - goto done; -file_backed: - pos = ((loff_t)current_request->sector << 9) + lo->lo_offset; - spin_unlock_irq(&io_request_lock); - if (current_request->cmd == WRITE) { - if (lo_send(lo, dest_addr, len, pos, blksize)) - goto error_out_lock; - } else { - if (lo_receive(lo, dest_addr, len, pos, blksize)) - goto error_out_lock; + /* + * piggy old buffer on original, and submit for I/O + */ + bh = loop_new_buffer(lo, rbh); + if (rw == WRITE) { + set_bit(BH_Dirty, &bh->b_state); + if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data, bh->b_size, rbh->b_rsector)) + goto err; } -done: - spin_lock_irq(&io_request_lock); - current_request->sector += current_request->current_nr_sectors; - current_request->nr_sectors -= current_request->current_nr_sectors; - list_add(¤t_request->queue, &q->queue_head); - end_request(1); - goto repeat; -error_out_lock: - spin_lock_irq(&io_request_lock); -error_out: - list_add(¤t_request->queue, &q->queue_head); - end_request(0); - goto repeat; + + generic_make_request(rw, bh); + return 0; + +err: + buffer_IO_error(rbh); + return 0; } static int loop_set_fd(struct loop_device *lo, kdev_t dev, unsigned int arg) { struct file *file; struct inode *inode; - int error; + int error, bs; MOD_INC_USE_COUNT; @@ -413,6 +415,9 @@ inode = file->f_dentry->d_inode; if (S_ISBLK(inode->i_mode)) { + if (MAJOR(inode->i_rdev) == LOOP_MAJOR) + goto out; + /* dentry will be wired, so... */ error = blkdev_get(inode->i_bdev, file->f_mode, file->f_flags, BDEV_FILE); @@ -429,6 +434,9 @@ } else if (S_ISREG(inode->i_mode)) { struct address_space_operations *aops; + if (MAJOR(inode->i_rdev) == LOOP_MAJOR) + goto out; + aops = inode->i_mapping->a_ops; /* * If we can't read - sorry. If we only can't write - well, @@ -478,11 +486,22 @@ if (IS_RDONLY (inode) || is_read_only(lo->lo_device)) lo->lo_flags |= LO_FLAGS_READ_ONLY; + bs = 0; + if (blksize_size[MAJOR(inode->i_rdev)]) + bs = blksize_size[MAJOR(inode->i_rdev)][MINOR(inode->i_rdev)]; + if (!bs) + bs = BLOCK_SIZE; + + lo->lo_blksize = bs; + set_blocksize(dev, bs); + set_device_ro(dev, (lo->lo_flags & LO_FLAGS_READ_ONLY)!=0); lo->lo_dentry = dget(file->f_dentry); lo->transfer = NULL; lo->ioctl = NULL; + lo->old_gfp_mask = inode->i_mapping->gfp_mask; + inode->i_mapping->gfp_mask = GFP_BUFFER; figure_loop_size(lo); out_putf: @@ -526,6 +545,7 @@ static int loop_clr_fd(struct loop_device *lo, kdev_t dev) { struct dentry *dentry = lo->lo_dentry; + int gfp = lo->old_gfp_mask; if (!dentry) return -ENXIO; @@ -558,6 +578,7 @@ memset(lo->lo_name, 0, LO_NAME_SIZE); loop_sizes[lo->lo_number] = 0; invalidate_buffers(dev); + dentry->d_inode->i_mapping->gfp_mask = gfp; MOD_DEC_USE_COUNT; return 0; } @@ -732,11 +753,8 @@ /* * And now the modules code and kernel interface. */ -#ifdef MODULE -#define loop_init init_module MODULE_PARM(max_loop, "i"); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-255)"); -#endif int loop_register_transfer(struct loop_func_table *funcs) { @@ -767,32 +785,35 @@ EXPORT_SYMBOL(loop_register_transfer); EXPORT_SYMBOL(loop_unregister_transfer); -static void no_plug_device(request_queue_t *q, kdev_t device) -{ -} - int __init loop_init(void) { int i; + if ((max_loop < 1) || (max_loop > 255)) { + printk(KERN_WARNING "loop: invalid max_loop (must be between" + " 1 and 255), using default (8)\n"); + max_loop = 8; + } + if (devfs_register_blkdev(MAJOR_NR, "loop", &lo_fops)) { - printk(KERN_WARNING "Unable to get major number %d for loop device\n", - MAJOR_NR); + printk(KERN_WARNING "Unable to get major number %d for loop" + " device\n", MAJOR_NR); return -EIO; } + + loop_bhp = kmem_cache_create("loop_buffers", sizeof(struct buffer_head), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!loop_bhp) { + printk(KERN_WARNING "loop: unable to create slab cache\n"); + return -ENOMEM; + } + devfs_handle = devfs_mk_dir (NULL, "loop", NULL); devfs_register_series (devfs_handle, "%u", max_loop, DEVFS_FL_DEFAULT, MAJOR_NR, 0, S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, &lo_fops, NULL); - if ((max_loop < 1) || (max_loop > 255)) { - printk (KERN_WARNING "loop: invalid max_loop (must be between 1 and 255), using default (8)\n"); - max_loop = 8; - } - - printk(KERN_INFO "loop: enabling %d loop devices\n", max_loop); - loop_dev = kmalloc (max_loop * sizeof(struct loop_device), GFP_KERNEL); if (!loop_dev) { printk (KERN_ERR "loop: Unable to create loop_dev\n"); @@ -814,9 +835,8 @@ return -ENOMEM; } - blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); - blk_queue_pluggable(BLK_DEFAULT_QUEUE(MAJOR_NR), no_plug_device); - blk_queue_headactive(BLK_DEFAULT_QUEUE(MAJOR_NR), 0); + blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), loop_make_request); + for (i=0; i < max_loop; i++) { memset(&loop_dev[i], 0, sizeof(struct loop_device)); loop_dev[i].lo_number = i; @@ -828,22 +848,24 @@ for (i=0; i < max_loop; i++) register_disk(NULL, MKDEV(MAJOR_NR,i), 1, &lo_fops, 0); + printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop); return 0; } -#ifdef MODULE -void cleanup_module(void) +void loop_exit(void) { + kmem_cache_destroy(loop_bhp); devfs_unregister (devfs_handle); if (devfs_unregister_blkdev(MAJOR_NR, "loop") != 0) printk(KERN_WARNING "loop: cannot unregister blkdev\n"); - blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); kfree (loop_dev); kfree (loop_sizes); kfree (loop_blksizes); } -#endif + +module_init(loop_init); +module_exit(loop_exit); #ifndef MODULE static int __init max_loop_setup(char *str) diff -ur linux-2.4.0/fs/buffer.c linux-2.4.0-loop/fs/buffer.c --- linux-2.4.0/fs/buffer.c Mon Jan 8 01:31:03 2001 +++ linux-2.4.0-loop/fs/buffer.c Mon Jan 8 01:31:27 2001 @@ -1074,6 +1074,10 @@ if (state < 0) return; + + if (state && (!dev || MAJOR(dev) == LOOP_MAJOR)) + state = 0; + wakeup_bdflush(state); } @@ -1624,6 +1628,8 @@ return err; } +#define CAN_DO_IO(mapping) (!(mapping) || ((mapping)->gfp_mask & __GFP_IO)) + static int __block_commit_write(struct inode *inode, struct page *page, unsigned from, unsigned to) { @@ -1646,13 +1652,15 @@ if (!atomic_set_buffer_dirty(bh)) { __mark_dirty(bh); buffer_insert_inode_queue(bh, inode); - need_balance_dirty = 1; + if (CAN_DO_IO(inode->i_mapping)) + need_balance_dirty = 1; } } } if (need_balance_dirty) balance_dirty(bh->b_dev); + /* * is this a partial write that happened to make all buffers * uptodate then we can optimize away a bogus readpage() for diff -ur linux-2.4.0/fs/inode.c linux-2.4.0-loop/fs/inode.c --- linux-2.4.0/fs/inode.c Mon Jan 8 01:31:03 2001 +++ linux-2.4.0-loop/fs/inode.c Mon Jan 8 01:31:27 2001 @@ -607,6 +607,7 @@ inode->i_bdev = NULL; inode->i_data.a_ops = &empty_aops; inode->i_data.host = inode; + inode->i_data.gfp_mask = GFP_HIGHUSER; inode->i_mapping = &inode->i_data; } diff -ur linux-2.4.0/fs/nfs/dir.c linux-2.4.0-loop/fs/nfs/dir.c --- linux-2.4.0/fs/nfs/dir.c Sun Dec 10 18:55:48 2000 +++ linux-2.4.0-loop/fs/nfs/dir.c Mon Jan 8 01:31:27 2001 @@ -321,7 +321,7 @@ desc->page = NULL; } - page = page_cache_alloc(); + page = page_cache_alloc(NULL); if (!page) { status = -ENOMEM; goto out; diff -ur linux-2.4.0/include/linux/fs.h linux-2.4.0-loop/include/linux/fs.h --- linux-2.4.0/include/linux/fs.h Mon Jan 8 01:31:03 2001 +++ linux-2.4.0-loop/include/linux/fs.h Mon Jan 8 01:31:27 2001 @@ -372,6 +372,7 @@ struct vm_area_struct *i_mmap; /* list of private mappings */ struct vm_area_struct *i_mmap_shared; /* list of shared mappings */ spinlock_t i_shared_lock; /* and spinlock protecting it */ + int gfp_mask; /* how to allocate the pages */ }; struct block_device { diff -ur linux-2.4.0/include/linux/highmem.h linux-2.4.0-loop/include/linux/highmem.h --- linux-2.4.0/include/linux/highmem.h Thu Jan 4 23:50:47 2001 +++ linux-2.4.0-loop/include/linux/highmem.h Mon Jan 8 01:31:27 2001 @@ -40,6 +40,8 @@ #define bh_kmap(bh) ((bh)->b_data) #define bh_kunmap(bh) do { } while (0); +#define create_bounce(rw, bh) (bh) + #endif /* CONFIG_HIGHMEM */ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ diff -ur linux-2.4.0/include/linux/loop.h linux-2.4.0-loop/include/linux/loop.h --- linux-2.4.0/include/linux/loop.h Mon Dec 11 21:50:30 2000 +++ linux-2.4.0-loop/include/linux/loop.h Mon Jan 8 01:31:27 2001 @@ -39,12 +39,24 @@ struct file * lo_backing_file; void *key_data; char key_reserved[48]; /* for use by the filter modules */ + + int lo_blksize; + int old_gfp_mask; }; typedef int (* transfer_proc_t)(struct loop_device *, int cmd, char *raw_buf, char *loop_buf, int size, int real_block); +extern inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf, + char *lbuf, int size, int rblock) +{ + if (!lo->transfer) + return 0; + + return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock); +} + #endif /* __KERNEL__ */ /* @@ -102,9 +114,8 @@ /* Support for loadable transfer modules */ struct loop_func_table { int number; /* filter type */ - int (*transfer)(struct loop_device *lo, int cmd, - char *raw_buf, char *loop_buf, int size, - int real_block); + int (*transfer)(struct loop_device *lo, int cmd, char *raw_buf, + char *loop_buf, int size, int real_block); int (*init)(struct loop_device *, struct loop_info *); /* release is called from loop_unregister_transfer or clr_fd */ int (*release)(struct loop_device *); diff -ur linux-2.4.0/include/linux/pagemap.h linux-2.4.0-loop/include/linux/pagemap.h --- linux-2.4.0/include/linux/pagemap.h Thu Jan 4 23:50:47 2001 +++ linux-2.4.0-loop/include/linux/pagemap.h Mon Jan 8 01:31:27 2001 @@ -29,9 +29,13 @@ #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) #define page_cache_get(x) get_page(x) -#define page_cache_alloc() alloc_pages(GFP_HIGHUSER, 0) #define page_cache_free(x) __free_page(x) #define page_cache_release(x) __free_page(x) + +static inline struct page *page_cache_alloc(struct address_space *x) +{ + return alloc_pages(x ? x->gfp_mask : GFP_HIGHUSER, 0); +} /* * From a kernel address, get the "struct page *" diff -ur linux-2.4.0/kernel/ksyms.c linux-2.4.0-loop/kernel/ksyms.c --- linux-2.4.0/kernel/ksyms.c Mon Jan 8 01:31:03 2001 +++ linux-2.4.0-loop/kernel/ksyms.c Mon Jan 8 01:31:27 2001 @@ -120,6 +120,7 @@ EXPORT_SYMBOL(kmap_high); EXPORT_SYMBOL(kunmap_high); EXPORT_SYMBOL(highmem_start_page); +EXPORT_SYMBOL(create_bounce); #endif /* filesystem internal functions */ diff -ur linux-2.4.0/mm/filemap.c linux-2.4.0-loop/mm/filemap.c --- linux-2.4.0/mm/filemap.c Mon Jan 8 01:31:03 2001 +++ linux-2.4.0-loop/mm/filemap.c Mon Jan 8 01:31:27 2001 @@ -559,7 +559,7 @@ if (page) return 0; - page = page_cache_alloc(); + page = page_cache_alloc(mapping); if (!page) return -ENOMEM; @@ -1178,7 +1178,7 @@ */ if (!cached_page) { spin_unlock(&pagecache_lock); - cached_page = page_cache_alloc(); + cached_page = page_cache_alloc(mapping); if (!cached_page) { desc->error = -ENOMEM; break; @@ -1478,7 +1478,7 @@ */ old_page = page; if (no_share) { - struct page *new_page = page_cache_alloc(); + struct page *new_page = page_cache_alloc(NULL); if (new_page) { copy_user_highpage(new_page, old_page, address); @@ -2320,7 +2320,7 @@ page = __find_get_page(mapping, index, hash); if (!page) { if (!cached_page) { - cached_page = page_cache_alloc(); + cached_page = page_cache_alloc(mapping); if (!cached_page) return ERR_PTR(-ENOMEM); } @@ -2383,7 +2383,7 @@ page = __find_lock_page(mapping, index, hash); if (!page) { if (!*cached_page) { - *cached_page = page_cache_alloc(); + *cached_page = page_cache_alloc(mapping); if (!*cached_page) return NULL; } diff -ur linux-2.4.0/mm/memory.c linux-2.4.0-loop/mm/memory.c --- linux-2.4.0/mm/memory.c Mon Jan 8 01:31:03 2001 +++ linux-2.4.0-loop/mm/memory.c Mon Jan 8 01:31:27 2001 @@ -862,7 +862,7 @@ * Ok, we need to copy. Oh, well.. */ spin_unlock(&mm->page_table_lock); - new_page = page_cache_alloc(); + new_page = page_cache_alloc(NULL); if (!new_page) return -1; spin_lock(&mm->page_table_lock); diff -ur linux-2.4.0/mm/shmem.c linux-2.4.0-loop/mm/shmem.c --- linux-2.4.0/mm/shmem.c Fri Dec 29 23:21:48 2000 +++ linux-2.4.0-loop/mm/shmem.c Mon Jan 8 01:31:27 2001 @@ -301,7 +301,7 @@ inode->i_sb->u.shmem_sb.free_blocks--; spin_unlock (&inode->i_sb->u.shmem_sb.stat_lock); /* Ok, get a new page */ - page = page_cache_alloc(); + page = page_cache_alloc(mapping); if (!page) goto oom; clear_user_highpage(page, address); @@ -316,7 +316,7 @@ up(&inode->i_sem); if (no_share) { - struct page *new_page = page_cache_alloc(); + struct page *new_page = page_cache_alloc(mapping); if (new_page) { copy_user_highpage(new_page, page, address);