# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.822 -> 1.823 # fs/bio.c 1.33 -> 1.34 # include/linux/bio.h 1.24 -> 1.25 # # The following is the BitKeeper ChangeSet Log # -------------------------------------------- # 02/10/28 axboe@burns.home.kernel.dk 1.823 # Add bio_map_user(). This maps a range of user space memory into a # bio, suitable for block device io. # -------------------------------------------- # diff -Nru a/fs/bio.c b/fs/bio.c --- a/fs/bio.c Mon Oct 28 18:49:48 2002 +++ b/fs/bio.c Mon Oct 28 18:49:48 2002 @@ -447,6 +447,131 @@ } /** + * bio_map_user - map user address into bio + * @bdev: destination block device + * @uaddr: start of user address + * @len: length in bytes + * @write_to_vm: bool indicating writing to pages or not + * + * Map the user space address into a bio suitable for io to a block + * device. Caller should check the size of the returned bio, we might + * not have mapped the entire range specified. + */ +struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr, + unsigned int len, int write_to_vm) +{ + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + const int nr_pages = end - start; + request_queue_t *q = bdev_get_queue(bdev); + int ret, offset, i; + struct page **pages; + struct bio *bio; + + /* + * transfer and buffer must be aligned to at least hardsector + * size for now, in the future we can relax this restriction + */ + if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) + return NULL; + + bio = bio_alloc(GFP_KERNEL, nr_pages); + if (!bio) + return NULL; + + pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto out; + + down_read(¤t->mm->mmap_sem); + ret = get_user_pages(current, current->mm, uaddr, nr_pages, + write_to_vm, 0, pages, NULL); + up_read(¤t->mm->mmap_sem); + + if (ret < nr_pages) + goto out; + + bio->bi_bdev = bdev; + + offset = uaddr & ~PAGE_MASK; + for (i = 0; i < nr_pages; i++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + /* + * sorry... + */ + if (bio_add_page(bio, pages[i], bytes, offset) < bytes) + break; + + len -= bytes; + offset = 0; + } + + /* + * release the pages we didn't map into the bio, if any + */ + while (i < nr_pages) + page_cache_release(pages[i++]); + + kfree(pages); + + /* + * check if the mapped pages need bouncing for an isa host. + */ + blk_queue_bounce(q, &bio); + return bio; +out: + kfree(pages); + bio_put(bio); + return NULL; +} + +/** + * bio_unmap_user - unmap a bio + * @bio: the bio being unmapped + * @write_to_vm: bool indicating whether pages were written to + * + * Unmap a bio previously mapped by bio_map_user(). The @write_to_vm + * must be the same as passed into bio_map_user(). Must be called with + * a process context. + */ +void bio_unmap_user(struct bio *bio, int write_to_vm) +{ + struct bio_vec *bvec; + int i; + + /* + * find original bio if it was bounced + */ + if (bio->bi_private) { + /* + * someone stole our bio, must not happen + */ + BUG_ON(!bio_flagged(bio, BIO_BOUNCED)); + + bio = bio->bi_private; + } + + /* + * make sure we dirty pages we wrote to + */ + __bio_for_each_segment(bvec, bio, i, 0) { + if (write_to_vm) + set_page_dirty(bvec->bv_page); + + page_cache_release(bvec->bv_page); + } + + bio_put(bio); + } + +/** * bio_endio - end I/O on a bio * @bio: bio * @bytes_done: number of bytes completed @@ -560,3 +685,5 @@ EXPORT_SYMBOL(bio_hw_segments); EXPORT_SYMBOL(bio_add_page); EXPORT_SYMBOL(bio_get_nr_vecs); +EXPORT_SYMBOL(bio_map_user); +EXPORT_SYMBOL(bio_unmap_user); diff -Nru a/include/linux/bio.h b/include/linux/bio.h --- a/include/linux/bio.h Mon Oct 28 18:49:48 2002 +++ b/include/linux/bio.h Mon Oct 28 18:49:48 2002 @@ -132,6 +132,7 @@ #define bio_page(bio) bio_iovec((bio))->bv_page #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_sectors(bio) ((bio)->bi_size >> 9) +#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9) #define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio))) #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) @@ -215,6 +216,9 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_get_nr_vecs(struct block_device *); +extern struct bio *bio_map_user(struct block_device *, unsigned long, + unsigned int, int); +extern void bio_unmap_user(struct bio *, int); #ifdef CONFIG_HIGHMEM /*