diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/Makefile linux/drivers/block/Makefile --- /opt/kernel/linux-2.5.1-pre5/drivers/block/Makefile Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/Makefile Wed Dec 5 09:08:27 2001 @@ -10,9 +10,9 @@ O_TARGET := block.o -export-objs := elevator.o ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o +export-objs := elevator.o ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o block_ioctl.o -obj-y := elevator.o ll_rw_blk.o blkpg.o genhd.o +obj-y := elevator.o ll_rw_blk.o blkpg.o genhd.o block_ioctl.o obj-$(CONFIG_MAC_FLOPPY) += swim3.o obj-$(CONFIG_BLK_DEV_FD) += floppy.o diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/blkpg.c linux/drivers/block/blkpg.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/blkpg.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/blkpg.c Wed Dec 5 09:37:37 2001 @@ -194,7 +194,7 @@ /* * Common ioctl's for block devices */ - +extern int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg); int blk_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg) { request_queue_t *q; @@ -204,6 +204,10 @@ if (!dev) return -EINVAL; + + intval = block_ioctl(dev, cmd, arg); + if (intval != -ENOTTY) + return intval; switch (cmd) { case BLKROSET: diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/block_ioctl.c linux/drivers/block/block_ioctl.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/block_ioctl.c Wed Dec 31 19:00:00 1969 +++ linux/drivers/block/block_ioctl.c Thu Dec 6 03:43:51 2001 @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2001 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +int blk_do_rq(request_queue_t *q, struct request *rq) +{ + DECLARE_COMPLETION(wait); + int err = 0; + + rq->flags |= REQ_BARRIER; + rq->waiting = &wait; + elv_add_request(q, rq); + generic_unplug_device(q); + wait_for_completion(&wait); + + /* + * for now, never retry anything + */ + if (rq->errors) + err = -EIO; + + return err; +} + +int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg) +{ + request_queue_t *q; + struct request *rq; + int close = 0, err; + + q = blk_get_queue(dev); + if (!q) + return -ENXIO; + + switch (cmd) { + case CDROMCLOSETRAY: + close = 1; + case CDROMEJECT: + rq = blk_get_request(q, WRITE, __GFP_WAIT); + rq->flags = REQ_BLOCK_PC; + memset(rq->cdb, 0, sizeof(rq->cdb)); + rq->cdb[0] = GPCMD_START_STOP_UNIT; + rq->cdb[4] = 0x02 + (close != 0); + err = blk_do_rq(q, rq); + blk_put_request(rq); + break; + default: + err = -ENOTTY; + } + + return err; +} diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/cciss.c linux/drivers/block/cciss.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/cciss.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/cciss.c Tue Dec 4 14:16:36 2001 @@ -1252,9 +1252,9 @@ c->Request.Type.Type = TYPE_CMD; // It is a command. c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = - (creq->cmd == READ) ? XFER_READ: XFER_WRITE; + (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE; c->Request.Timeout = 0; // Don't time out - c->Request.CDB[0] = (creq->cmd == READ) ? CCISS_READ : CCISS_WRITE; + c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE; start_blk = creq->sector; #ifdef CCISS_DEBUG printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector, @@ -1869,7 +1869,7 @@ blk_init_queue(q, do_cciss_request); blk_queue_headactive(q, 0); blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); - q->max_segments = MAXSGENTRIES; + blk_queue_max_segments(q, MAXSGENTRIES); blk_queue_max_sectors(q, 512); /* fill in the other Kernel structs */ diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/cpqarray.c linux/drivers/block/cpqarray.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/cpqarray.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/cpqarray.c Tue Dec 4 14:05:32 2001 @@ -470,7 +470,7 @@ blk_init_queue(q, do_ida_request); blk_queue_headactive(q, 0); blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); - q->max_segments = SG_MAX; + blk_queue_max_segments(q, SG_MAX); blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256); read_ahead[MAJOR_NR+i] = READ_AHEAD; @@ -898,7 +898,7 @@ seg = blk_rq_map_sg(q, creq, tmp_sg); /* Now do all the DMA Mappings */ - if (creq->cmd == READ) + if (rq_data_dir(creq) == READ) dir = PCI_DMA_FROMDEVICE; else dir = PCI_DMA_TODEVICE; @@ -913,7 +913,7 @@ DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); c->req.hdr.sg_cnt = seg; c->req.hdr.blk_cnt = creq->nr_sectors; - c->req.hdr.cmd = (creq->cmd == READ) ? IDA_READ : IDA_WRITE; + c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; c->type = CMD_RWREQ; spin_lock_irq(&q->queue_lock); diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/elevator.c linux/drivers/block/elevator.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/elevator.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/elevator.c Tue Dec 4 14:28:20 2001 @@ -58,7 +58,7 @@ next_rq = list_entry(next, struct request, queuelist); - BUG_ON(!next_rq->inactive); + BUG_ON(next_rq->flags & REQ_STARTED); /* * if the device is different (not a normal case) just check if @@ -93,14 +93,21 @@ /* * can we safely merge with this request? */ -inline int elv_rq_merge_ok(request_queue_t *q, struct request *rq, - struct bio *bio) +inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) { - if (bio_data_dir(bio) == rq->cmd) { - if (rq->rq_dev == bio->bi_dev && !rq->waiting - && !rq->special && rq->inactive) - return 1; - } + /* + * different data direction or already started, don't merge + */ + if (bio_data_dir(bio) != rq_data_dir(rq)) + return 0; + if (rq->flags & REQ_NOMERGE) + return 0; + + /* + * same device and no special stuff set, merge is ok + */ + if (rq->rq_dev == bio->bi_dev && !rq->waiting && !rq->special) + return 1; return 0; } @@ -124,14 +131,12 @@ */ if (__rq->elevator_sequence-- <= 0) break; - - if (unlikely(__rq->waiting || __rq->special)) - continue; - if (unlikely(!__rq->inactive)) + if (__rq->flags & (REQ_BARRIER | REQ_STARTED)) break; + if (!*req && bio_rq_in_between(bio, __rq, &q->queue_head)) *req = __rq; - if (!elv_rq_merge_ok(q, __rq, bio)) + if (!elv_rq_merge_ok(__rq, bio)) continue; if (__rq->elevator_sequence < count) @@ -218,11 +223,10 @@ prefetch(list_entry_rq(entry->prev)); - if (unlikely(__rq->waiting || __rq->special)) - continue; - if (unlikely(!__rq->inactive)) + if (__rq->flags & (REQ_BARRIER | REQ_STARTED)) break; - if (!elv_rq_merge_ok(q, __rq, bio)) + + if (!elv_rq_merge_ok(__rq, bio)) continue; /* diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/floppy.c linux/drivers/block/floppy.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/floppy.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/floppy.c Mon Dec 3 12:33:21 2001 @@ -2319,7 +2319,7 @@ DPRINT("request list destroyed in floppy request done\n"); } else { - if (CURRENT->cmd == WRITE) { + if (rq_data_dir(CURRENT) == WRITE) { /* record write error information */ DRWE->write_errors++; if (DRWE->write_errors == 1) { @@ -2621,10 +2621,10 @@ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; - if (CURRENT->cmd == READ){ + if (rq_data_dir(CURRENT) == READ) { raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy,FD_READ); - } else if (CURRENT->cmd == WRITE){ + } else if (rq_data_dir(CURRENT) == WRITE){ raw_cmd->flags |= FD_RAW_WRITE; COMMAND = FM_MODE(_floppy,FD_WRITE); } else { @@ -2974,7 +2974,7 @@ if (usage_count == 0) { printk("warning: usage count=0, CURRENT=%p exiting\n", CURRENT); - printk("sect=%ld cmd=%d\n", CURRENT->sector, CURRENT->cmd); + printk("sect=%ld flags=%lx\n", CURRENT->sector, CURRENT->flags); return; } if (fdc_busy){ diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/ida_cmd.h linux/drivers/block/ida_cmd.h --- /opt/kernel/linux-2.5.1-pre5/drivers/block/ida_cmd.h Fri Nov 2 20:45:42 2001 +++ linux/drivers/block/ida_cmd.h Thu Dec 6 07:54:47 2001 @@ -67,7 +67,7 @@ __u8 reserved; } rhdr_t; -#define SG_MAX 32 +#define SG_MAX 31 typedef struct { rhdr_t hdr; sg_t sg[SG_MAX]; diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/ll_rw_blk.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/ll_rw_blk.c Thu Dec 6 08:06:21 2001 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -253,6 +254,68 @@ q->seg_boundary_mask = mask; } +static char *rq_flags[] = { "REQ_RW", "REQ_RW_AHEAD", "REQ_BARRIER", + "REQ_CMD", "REQ_NOMERGE", "REQ_STARTED", + "REQ_DONTPREP", "REQ_DRIVE_CMD", "REQ_DRIVE_TASK", + "REQ_PC", "REQ_SENSE", "REQ_SPECIAL" }; + +void blk_dump_rq_flags(struct request *rq, char *msg) +{ + int bit; + + printk("%s: dev %x: ", msg, rq->rq_dev); + bit = 0; + do { + if (rq->flags & (1 << bit)) + printk("%s ", rq_flags[bit]); + bit++; + } while (bit < __REQ_NR_BITS); + + if (rq->flags & REQ_CMD) + printk("sector %lu, nr/cnr %lu/%u\n", rq->sector, + rq->nr_sectors, + rq->current_nr_sectors); + + printk("\n"); +} + +/* + * standard prep_rq_fn that builds 10 byte cdbs + */ +static int ll_10byte_cdb_build(request_queue_t *q, struct request *rq) +{ + int hard_sect = get_hardsect_size(rq->rq_dev); + sector_t block = rq->hard_sector / (hard_sect >> 9); + unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); + + if (!(rq->flags & REQ_CMD)) + return 0; + + if (rq_data_dir(rq) == READ) + rq->cdb[0] = READ_10; + else + rq->cdb[0] = WRITE_10; + + rq->cdb[1] = 0; + + /* + * fill in lba + */ + rq->cdb[2] = (block >> 24) & 0xff; + rq->cdb[3] = (block >> 16) & 0xff; + rq->cdb[4] = (block >> 8) & 0xff; + rq->cdb[5] = block & 0xff; + rq->cdb[6] = 0; + + /* + * and transfer length + */ + rq->cdb[7] = (blocks >> 8) & 0xff; + rq->cdb[8] = blocks & 0xff; + + return 0; +} + /* * can we merge the two segments, or do we need to start a new one? */ @@ -284,7 +347,7 @@ unsigned long long lastend; struct bio_vec *bvec; struct bio *bio; - int nsegs, i, cluster; + int nsegs, i, cluster, j; nsegs = 0; bio = rq->bio; @@ -294,7 +357,9 @@ /* * for each bio in rq */ + j = 0; rq_for_each_bio(bio, rq) { + j++; /* * for each segment in bio */ @@ -319,8 +384,9 @@ sg[nsegs - 1].length += nbytes; } else { new_segment: - if (nsegs >= q->max_segments) { + if (nsegs > q->max_segments) { printk("map: %d >= %d\n", nsegs, q->max_segments); + printk("map %d, %d, bio_sectors %d, vcnt %d\n", i, j, bio_sectors(bio), bio->bi_vcnt); BUG(); } @@ -342,10 +408,11 @@ * the standard queue merge functions, can be overridden with device * specific ones if so desired */ -static inline int ll_new_segment(request_queue_t *q, struct request *req) +static inline int ll_new_segment(request_queue_t *q, struct request *req, + struct bio *bio) { - if (req->nr_segments < q->max_segments) { - req->nr_segments++; + if ((req->nr_segments + bio->bi_vcnt) < q->max_segments) { + req->nr_segments += bio->bi_vcnt; return 1; } return 0; @@ -359,7 +426,7 @@ if (blk_same_segment(q, req->biotail, bio)) return 1; - return ll_new_segment(q, req); + return ll_new_segment(q, req, bio); } static int ll_front_merge_fn(request_queue_t *q, struct request *req, @@ -370,7 +437,7 @@ if (blk_same_segment(q, bio, req->bio)) return 1; - return ll_new_segment(q, req); + return ll_new_segment(q, req, bio); } static int ll_merge_requests_fn(request_queue_t *q, struct request *req, @@ -396,7 +463,7 @@ * This is called with interrupts off and no requests on the queue. * (and with the request spinlock acquired) */ -static void blk_plug_device(request_queue_t *q) +void blk_plug_device(request_queue_t *q) { /* * common case @@ -573,6 +640,7 @@ q->back_merge_fn = ll_back_merge_fn; q->front_merge_fn = ll_front_merge_fn; q->merge_requests_fn = ll_merge_requests_fn; + q->prep_rq_fn = ll_10byte_cdb_build; q->plug_tq.sync = 0; q->plug_tq.routine = &generic_unplug_device; q->plug_tq.data = q; @@ -604,10 +672,11 @@ rq = blkdev_free_rq(&rl->free); list_del(&rq->queuelist); rl->count--; - rq->inactive = 1; + rq->flags = 0; rq->rq_status = RQ_ACTIVE; rq->special = NULL; rq->q = q; + rq->rl = rl; } return rq; @@ -638,6 +707,25 @@ return rq; } +struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) +{ + struct request *rq; + + BUG_ON(rw != READ && rw != WRITE); + + rq = get_request(q, rw); + + if (!rq && (gfp_mask & __GFP_WAIT)) + rq = get_request_wait(q, rw); + + return rq; +} + +void blk_put_request(struct request *rq) +{ + blkdev_release_request(rq); +} + /* RO fail safe mechanism */ static long ro_bits[MAX_BLKDEV][8]; @@ -663,12 +751,13 @@ else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31)); } -void drive_stat_acct (kdev_t dev, int rw, unsigned long nr_sectors, int new_io) +void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) { - unsigned int major = MAJOR(dev); + unsigned int major = MAJOR(rq->rq_dev); + int rw = rq_data_dir(rq); unsigned int index; - index = disk_index(dev); + index = disk_index(rq->rq_dev); if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR)) return; @@ -691,13 +780,15 @@ static inline void add_request(request_queue_t * q, struct request * req, struct list_head *insert_here) { - drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1); + drive_stat_acct(req, req->nr_sectors, 1); - { + /* + * debug stuff... + */ + if (insert_here == &q->queue_head) { struct request *__rq = __elv_next_request(q); - if (__rq && !__rq->inactive && insert_here == &q->queue_head) - BUG(); + BUG_ON(__rq && (__rq->flags & REQ_STARTED)); } /* @@ -712,21 +803,21 @@ */ void blkdev_release_request(struct request *req) { - request_queue_t *q = req->q; - int rw = req->cmd; + struct request_list *rl = req->rl; req->rq_status = RQ_INACTIVE; req->q = NULL; + req->rl = NULL; /* * Request may not have originated from ll_rw_blk. if not, - * assume it has free buffers and check waiters + * it didn't come out of our reserved rq pools */ - if (q) { - list_add(&req->queuelist, &q->rq[rw].free); - if (++q->rq[rw].count >= batch_requests - && waitqueue_active(&q->rq[rw].wait)) - wake_up(&q->rq[rw].wait); + if (rl) { + list_add(&req->queuelist, &rl->free); + + if (++rl->count >= batch_requests &&waitqueue_active(&rl->wait)) + wake_up(&rl->wait); } } @@ -737,13 +828,28 @@ { struct request *next = blkdev_next_request(req); + /* + * not a rw command + */ + if (!(next->flags & REQ_CMD)) + return; + + /* + * not contigious + */ if (req->sector + req->nr_sectors != next->sector) return; - if (req->cmd != next->cmd + /* + * don't touch NOMERGE rq, or one that has been started by driver + */ + if (next->flags & (REQ_NOMERGE | REQ_STARTED)) + return; + + if (rq_data_dir(req) != rq_data_dir(next) || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > q->max_sectors - || next->waiting || next->special || !next->inactive) + || next->waiting || next->special) return; /* @@ -760,8 +866,6 @@ req->biotail->bi_next = next->bio; req->biotail = next->biotail; - next->bio = next->biotail = NULL; - req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; blkdev_release_request(next); @@ -835,8 +939,7 @@ spin_lock_prefetch(&q->queue_lock); latency = elevator_request_latency(elevator, rw); - - barrier = test_bit(BIO_BARRIER, &bio->bi_flags); + barrier = test_bit(BIO_RW_BARRIER, &bio->bi_rw); again: req = NULL; @@ -844,35 +947,22 @@ spin_lock_irq(&q->queue_lock); - /* - * barrier write must not be passed - so insert with 0 latency at - * the back of the queue and invalidate the entire existing merge hash - * for this device - */ - if (barrier && !freereq) - latency = 0; - insert_here = head->prev; if (blk_queue_empty(q) || barrier) { blk_plug_device(q); goto get_rq; -#if 0 - } else if (test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { - head = head->next; -#else } else if ((req = __elv_next_request(q))) { - if (!req->inactive) + if (req->flags & REQ_STARTED) head = head->next; req = NULL; -#endif } el_ret = elevator->elevator_merge_fn(q, &req, head, bio); switch (el_ret) { case ELEVATOR_BACK_MERGE: - if (&req->queuelist == head && !req->inactive) - BUG(); + BUG_ON(req->flags & REQ_STARTED); + BUG_ON(req->flags & REQ_NOMERGE); if (!q->back_merge_fn(q, req, bio)) break; elevator->elevator_merge_cleanup_fn(q, req, nr_sectors); @@ -880,13 +970,13 @@ req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; - drive_stat_acct(req->rq_dev, req->cmd, nr_sectors, 0); + drive_stat_acct(req, nr_sectors, 0); attempt_back_merge(q, req); goto out; case ELEVATOR_FRONT_MERGE: - if (&req->queuelist == head && !req->inactive) - BUG(); + BUG_ON(req->flags & REQ_STARTED); + BUG_ON(req->flags & REQ_NOMERGE); if (!q->front_merge_fn(q, req, bio)) break; elevator->elevator_merge_cleanup_fn(q, req, nr_sectors); @@ -903,7 +993,7 @@ req->hard_cur_sectors = cur_nr_sectors; req->sector = req->hard_sector = sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; - drive_stat_acct(req->rq_dev, req->cmd, nr_sectors, 0); + drive_stat_acct(req, nr_sectors, 0); attempt_front_merge(q, head, req); goto out; @@ -941,7 +1031,7 @@ /* * READA bit set */ - if (bio->bi_rw & RWA_MASK) { + if (bio->bi_rw & (1 << BIO_RW_AHEAD)) { set_bit(BIO_RW_BLOCK, &bio->bi_flags); goto end_io; } @@ -954,7 +1044,19 @@ * fill up the request-info, and add it to the queue */ req->elevator_sequence = latency; - req->cmd = rw; + + /* + * first three bits are identical in rq->flags and bio->bi_rw, + * see bio.h and blkdev.h + */ + req->flags = (bio->bi_rw & 7) | REQ_CMD; + + /* + * REQ_BARRIER implies no merging, but lets make it explicit + */ + if (barrier) + req->flags |= (REQ_BARRIER | REQ_NOMERGE); + req->errors = 0; req->hard_sector = req->sector = sector; req->hard_nr_sectors = req->nr_sectors = nr_sectors; @@ -967,11 +1069,8 @@ req->rq_dev = bio->bi_dev; add_request(q, req, insert_here); out: - if (freereq) { - freereq->bio = freereq->biotail = NULL; + if (freereq) blkdev_release_request(freereq); - } - spin_unlock_irq(&q->queue_lock); return 0; @@ -1083,10 +1182,11 @@ } /* - * uh oh, need to split this bio... not implemented yet + * this needs to be handled by q->make_request_fn, to just + * setup a part of the bio in the request to enable easy + * multiple passing */ - if (bio_sectors(bio) > q->max_sectors) - BUG(); + BUG_ON(bio_sectors(bio) > q->max_sectors); /* * If this device has partitions, remap block n @@ -1108,7 +1208,6 @@ bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); bio_put(bio); - return 0; } @@ -1312,7 +1411,7 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors) { struct bio *bio, *nxt; - int nsect; + int nsect, total_nsect = 0; req->errors = 0; if (!uptodate) @@ -1324,13 +1423,16 @@ nsect = bio_iovec(bio)->bv_len >> 9; nr_sectors -= nsect; + total_nsect += nsect; - nxt = bio->bi_next; - bio->bi_next = NULL; - if (!bio_endio(bio, uptodate, nsect)) - req->bio = nxt; - else - bio->bi_next = nxt; + if (++bio->bi_idx >= bio->bi_vcnt) { + nxt = bio->bi_next; + if (!bio_endio(bio, uptodate, total_nsect)) { + total_nsect = 0; + req->bio = nxt; + } else + BUG(); + } if ((bio = req->bio) != NULL) { req->hard_sector += nsect; @@ -1437,3 +1539,4 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_nohighio); +EXPORT_SYMBOL(blk_dump_rq_flags); diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/nbd.c linux/drivers/block/nbd.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/nbd.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/nbd.c Thu Dec 6 05:50:38 2001 @@ -149,30 +149,41 @@ void nbd_send_req(struct socket *sock, struct request *req) { - int result; + int result, rw, i, flags; struct nbd_request request; unsigned long size = req->nr_sectors << 9; DEBUG("NBD: sending control, "); request.magic = htonl(NBD_REQUEST_MAGIC); - request.type = htonl(req->cmd); + request.type = htonl(req->flags); request.from = cpu_to_be64( (u64) req->sector << 9); request.len = htonl(size); memcpy(request.handle, &req, sizeof(req)); - result = nbd_xmit(1, sock, (char *) &request, sizeof(request), req->cmd == WRITE ? MSG_MORE : 0); + rw = rq_data_dir(req); + + result = nbd_xmit(1, sock, (char *) &request, sizeof(request), rw & WRITE ? MSG_MORE : 0); if (result <= 0) FAIL("Sendmsg failed for control."); - if (req->cmd == WRITE) { - struct bio *bio = req->bio; - DEBUG("data, "); - do { - result = nbd_xmit(1, sock, bio_data(bio), bio->bi_size, bio->bi_next == NULL ? 0 : MSG_MORE); - if (result <= 0) - FAIL("Send data failed."); - bio = bio->bi_next; - } while(bio); + if (rw & WRITE) { + struct bio *bio; + /* + * we are really probing at internals to determine + * whether to set MSG_MORE or not... + */ + rq_for_each_bio(bio, req) { + struct bio_vec *bvec; + bio_for_each_segment(bvec, bio, i) { + flags = 0; + if ((i < (bio->bi_vcnt - 1)) || bio->bi_next) + flags = MSG_MORE; + DEBUG("data, "); + result = nbd_xmit(1, sock, page_address(bvec->bv_page) + bvec->bv_offset, bvec->bv_len, flags); + if (result <= 0) + FAIL("Send data failed."); + } + } } return; @@ -204,7 +215,7 @@ HARDFAIL("Not enough magic."); if (ntohl(reply.error)) FAIL("Other side returned error."); - if (req->cmd == READ) { + if (rq_data_dir(req) == READ) { struct bio *bio = req->bio; DEBUG("data, "); do { @@ -321,10 +332,13 @@ if (dev >= MAX_NBD) FAIL("Minor too big."); /* Probably can not happen */ #endif + if (!(req->flags & REQ_CMD)) + goto error_out; + lo = &nbd_dev[dev]; if (!lo->file) FAIL("Request when not-ready."); - if ((req->cmd == WRITE) && (lo->flags & NBD_READ_ONLY)) + if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_READ_ONLY)) FAIL("Write on read-only"); #ifdef PARANOIA if (lo->magic != LO_MAGIC) @@ -374,7 +388,7 @@ switch (cmd) { case NBD_DISCONNECT: printk("NBD_DISCONNECT\n") ; - sreq.cmd=2 ; /* shutdown command */ + sreq.flags = REQ_SPECIAL; /* FIXME: interpet as shutdown cmd */ if (!lo->sock) return -EINVAL ; nbd_send_req(lo->sock,&sreq) ; return 0 ; diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/paride/pcd.c linux/drivers/block/paride/pcd.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/paride/pcd.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/paride/pcd.c Mon Dec 3 12:56:25 2001 @@ -768,7 +768,7 @@ while (1) { if (QUEUE_EMPTY || (CURRENT->rq_status == RQ_INACTIVE)) return; INIT_REQUEST; - if (CURRENT->cmd == READ) { + if (rq_data_dir(CURRENT) == READ) { unit = MINOR(CURRENT->rq_dev); if (unit != pcd_unit) { pcd_bufblk = -1; diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/paride/pd.c linux/drivers/block/paride/pd.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/paride/pd.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/paride/pd.c Mon Dec 3 12:56:07 2001 @@ -858,7 +858,7 @@ goto repeat; } - pd_cmd = CURRENT->cmd; + pd_cmd = rq_data_dir(CURRENT); pd_buf = CURRENT->buffer; pd_retries = 0; @@ -884,7 +884,7 @@ /* paranoia */ if (QUEUE_EMPTY || - (CURRENT->cmd != pd_cmd) || + (rq_data_dir(CURRENT) != pd_cmd) || (MINOR(CURRENT->rq_dev) != pd_dev) || (CURRENT->rq_status == RQ_INACTIVE) || (CURRENT->sector != pd_block)) diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/paride/pf.c linux/drivers/block/paride/pf.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/paride/pf.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/paride/pf.c Mon Dec 3 12:56:54 2001 @@ -859,7 +859,7 @@ goto repeat; } - pf_cmd = CURRENT->cmd; + pf_cmd = rq_data_dir(CURRENT); pf_buf = CURRENT->buffer; pf_retries = 0; @@ -885,7 +885,7 @@ /* paranoia */ if (QUEUE_EMPTY || - (CURRENT->cmd != pf_cmd) || + (rq_data_dir(CURRENT) != pf_cmd) || (DEVICE_NR(CURRENT->rq_dev) != pf_unit) || (CURRENT->rq_status == RQ_INACTIVE) || (CURRENT->sector != pf_block)) diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/block/rd.c linux/drivers/block/rd.c --- /opt/kernel/linux-2.5.1-pre5/drivers/block/rd.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/block/rd.c Tue Dec 4 08:44:18 2001 @@ -485,7 +485,6 @@ ioctl: rd_ioctl, }; -#ifdef MODULE /* Before freeing the module, invalidate all of the protected buffers! */ static void __exit rd_cleanup (void) { @@ -503,7 +502,6 @@ unregister_blkdev( MAJOR_NR, "ramdisk" ); blk_clear(MAJOR_NR); } -#endif /* This is the registration and initialization section of the RAM disk driver */ int __init rd_init (void) diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/cdrom/cdrom.c linux/drivers/cdrom/cdrom.c --- /opt/kernel/linux-2.5.1-pre5/drivers/cdrom/cdrom.c Tue Dec 4 04:41:56 2001 +++ linux/drivers/cdrom/cdrom.c Thu Dec 6 08:01:56 2001 @@ -247,7 +247,7 @@ /* Define this to remove _all_ the debugging messages */ /* #define ERRLOGMASK CD_NOTHING */ -#define ERRLOGMASK (CD_WARNING) +#define ERRLOGMASK CD_WARNING /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */ /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */ diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c --- /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide-cd.c Tue Dec 4 04:41:57 2001 +++ linux/drivers/ide/ide-cd.c Thu Dec 6 07:57:35 2001 @@ -533,8 +533,8 @@ /* stuff the sense request in front of our current request */ rq = &info->request_sense_request; ide_init_drive_cmd(rq); - rq->cmd = REQUEST_SENSE_COMMAND; - rq->buffer = (char *) pc; + rq->flags = REQ_SENSE; + rq->special = (char *) pc; rq->waiting = wait; (void) ide_do_drive_cmd(drive, rq, ide_preempt); } @@ -544,17 +544,17 @@ { struct request *rq = HWGROUP(drive)->rq; - if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate) { - struct packet_command *pc = (struct packet_command *) rq->buffer; + if ((rq->flags & REQ_SENSE) && uptodate) { + struct packet_command *pc = (struct packet_command *) rq->special; cdrom_analyze_sense_data(drive, (struct packet_command *) pc->sense, (struct request_sense *) (pc->buffer - pc->c[4])); } - if (rq->cmd == READ || rq->cmd == WRITE) - if (!rq->current_nr_sectors) - uptodate = 1; - ide_end_request (uptodate, HWGROUP(drive)); + if ((rq->flags & REQ_CMD) && !rq->current_nr_sectors) + uptodate = 1; + + ide_end_request(uptodate, HWGROUP(drive)); } @@ -584,21 +584,20 @@ return 1; } - if (rq->cmd == REQUEST_SENSE_COMMAND) { + if (rq->flags & REQ_SENSE) { /* We got an error trying to get sense info from the drive (probably while trying to recover from a former error). Just give up. */ - pc = (struct packet_command *) rq->buffer; + pc = (struct packet_command *) rq->special; pc->stat = 1; cdrom_end_request (1, drive); *startstop = ide_error (drive, "request sense failure", stat); return 1; - - } else if (rq->cmd == PACKET_COMMAND) { + } else if (rq->flags & REQ_PC) { /* All other functions, except for READ. */ struct completion *wait = NULL; - pc = (struct packet_command *) rq->buffer; + pc = (struct packet_command *) rq->special; /* Check for tray open. */ if (sense_key == NOT_READY) { @@ -632,7 +631,7 @@ if ((stat & ERR_STAT) != 0) cdrom_queue_request_sense(drive, wait, pc->sense, pc); - } else { + } else if (rq->flags & REQ_CMD) { /* Handle errors from READ and WRITE requests. */ if (sense_key == NOT_READY) { @@ -671,7 +670,8 @@ queue a request sense command. */ if ((stat & ERR_STAT) != 0) cdrom_queue_request_sense(drive, NULL, NULL, NULL); - } + } else + blk_dump_rq_flags(rq, "ide-cd bad flags"); /* Retry, or handle the next request. */ *startstop = ide_stopped; @@ -681,7 +681,6 @@ static int cdrom_timer_expiry(ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; - struct packet_command *pc = (struct packet_command *) rq->buffer; unsigned long wait = 0; /* @@ -690,7 +689,7 @@ * this, but not all commands/drives support that. Let * ide_timer_expiry keep polling us for these. */ - switch (pc->c[0]) { + switch (rq->cdb[0]) { case GPCMD_BLANK: case GPCMD_FORMAT_UNIT: case GPCMD_RESERVE_RZONE_TRACK: @@ -700,6 +699,7 @@ wait = 0; break; } + return wait; } @@ -1116,11 +1116,7 @@ (65534 / CD_FRAMESIZE) : 65535); /* Set up the command */ - memset (&pc.c, 0, sizeof (pc.c)); - pc.c[0] = GPCMD_READ_10; - pc.c[7] = (nframes >> 8); - pc.c[8] = (nframes & 0xff); - put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]); + memcpy(pc.c, rq->cdb, sizeof(pc.c)); pc.timeout = WAIT_CMD; /* Send the command to the drive and return. */ @@ -1170,7 +1166,7 @@ sector -= nskip; frame = sector / SECTORS_PER_FRAME; - memset (&pc.c, 0, sizeof (pc.c)); + memset(rq->cdb, 0, sizeof(rq->cdb)); pc.c[0] = GPCMD_SEEK; put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]); @@ -1188,8 +1184,11 @@ return cdrom_start_packet_command (drive, 0, cdrom_start_seek_continuation); } -/* Fix up a possibly partially-processed request so that we can - start it over entirely */ +/* + * Fix up a possibly partially-processed request so that we can + * start it over entirely -- remember to call prep_rq_fn again since we + * may have changed the layout + */ static void restore_request (struct request *rq) { if (rq->buffer != bio_data(rq->bio)) { @@ -1201,6 +1200,7 @@ rq->hard_cur_sectors = rq->current_nr_sectors = bio_sectors(rq->bio); rq->hard_nr_sectors = rq->nr_sectors; rq->hard_sector = rq->sector; + rq->q->prep_rq_fn(rq->q, rq); } /* @@ -1210,18 +1210,7 @@ { struct cdrom_info *info = drive->driver_data; struct request *rq = HWGROUP(drive)->rq; - int minor = MINOR (rq->rq_dev); - - /* If the request is relative to a partition, fix it up to refer to the - absolute address. */ - if (minor & PARTN_MASK) { - rq->sector = block; - minor &= ~PARTN_MASK; - rq->rq_dev = MKDEV(MAJOR(rq->rq_dev), minor); - } - /* We may be retrying this request after an error. Fix up - any weirdness which might be present in the request packet. */ restore_request(rq); /* Satisfy whatever we can of this request from our cached sector. */ @@ -1258,7 +1247,7 @@ { int ireason, len, stat, thislen; struct request *rq = HWGROUP(drive)->rq; - struct packet_command *pc = (struct packet_command *)rq->buffer; + struct packet_command *pc = (struct packet_command *) rq->special; ide_startstop_t startstop; /* Check for errors. */ @@ -1354,7 +1343,7 @@ static ide_startstop_t cdrom_do_pc_continuation (ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; - struct packet_command *pc = (struct packet_command *)rq->buffer; + struct packet_command *pc = (struct packet_command *) rq->special; if (!pc->timeout) pc->timeout = WAIT_CMD; @@ -1368,7 +1357,7 @@ { int len; struct request *rq = HWGROUP(drive)->rq; - struct packet_command *pc = (struct packet_command *)rq->buffer; + struct packet_command *pc = (struct packet_command *) rq->special; struct cdrom_info *info = drive->driver_data; info->dma = 0; @@ -1407,8 +1396,8 @@ /* Start of retry loop. */ do { ide_init_drive_cmd (&req); - req.cmd = PACKET_COMMAND; - req.buffer = (char *)pc; + req.flags = REQ_PC; + req.special = (char *) pc; if (ide_do_drive_cmd (drive, &req, ide_wait)) { printk("%s: do_drive_cmd returned stat=%02x,err=%02x\n", drive->name, req.buffer[0], req.buffer[1]); @@ -1583,18 +1572,10 @@ nframes = rq->nr_sectors >> 2; frame = rq->sector >> 2; - memset(&pc.c, 0, sizeof(pc.c)); - /* - * we might as well use WRITE_12, but none of the device I have - * support the streaming feature anyway, so who cares. - */ - pc.c[0] = GPCMD_WRITE_10; + memcpy(pc.c, rq->cdb, sizeof(pc.c)); #if 0 /* the immediate bit */ pc.c[1] = 1 << 3; #endif - pc.c[7] = (nframes >> 8) & 0xff; - pc.c[8] = nframes & 0xff; - put_unaligned(cpu_to_be32(frame), (unsigned int *)&pc.c[2]); pc.timeout = 2 * WAIT_CMD; return cdrom_transfer_packet_command(drive, &pc, cdrom_write_intr); @@ -1631,6 +1612,28 @@ return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont); } +/* + * just wrap this around cdrom_do_packet_command + */ +static int cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) +{ + struct packet_command pc; + ide_startstop_t startstop; + + memset(&pc, 0, sizeof(pc)); + memcpy(pc.c, rq->cdb, sizeof(pc.c)); + pc.quiet = 1; + pc.timeout = 60 * HZ; + rq->special = (char *) &pc; + + startstop = cdrom_do_packet_command(drive); + if (pc.stat) + rq->errors++; + + return startstop; +} + + /**************************************************************************** * cdrom driver request routine. */ @@ -1640,50 +1643,45 @@ ide_startstop_t action; struct cdrom_info *info = drive->driver_data; - switch (rq->cmd) { - case WRITE: - case READ: { - if (CDROM_CONFIG_FLAGS(drive)->seeking) { - unsigned long elpased = jiffies - info->start_seek; - int stat = GET_STAT(); - - if ((stat & SEEK_STAT) != SEEK_STAT) { - if (elpased < IDECD_SEEK_TIMEOUT) { - ide_stall_queue(drive, IDECD_SEEK_TIMER); - return ide_stopped; - } - printk ("%s: DSC timeout\n", drive->name); + if (rq->flags & REQ_CMD) { + if (CDROM_CONFIG_FLAGS(drive)->seeking) { + unsigned long elpased = jiffies - info->start_seek; + int stat = GET_STAT(); + + if ((stat & SEEK_STAT) != SEEK_STAT) { + if (elpased < IDECD_SEEK_TIMEOUT) { + ide_stall_queue(drive, IDECD_SEEK_TIMER); + return ide_stopped; } - CDROM_CONFIG_FLAGS(drive)->seeking = 0; + printk ("%s: DSC timeout\n", drive->name); } - if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) - action = cdrom_start_seek (drive, block); - else { - if (rq->cmd == READ) - action = cdrom_start_read(drive, block); - else - action = cdrom_start_write(drive, rq); - } - info->last_block = block; - return action; - } - - case PACKET_COMMAND: - case REQUEST_SENSE_COMMAND: { - return cdrom_do_packet_command(drive); - } - - case RESET_DRIVE_COMMAND: { - cdrom_end_request(1, drive); - return ide_do_reset(drive); + CDROM_CONFIG_FLAGS(drive)->seeking = 0; } - - default: { - printk("ide-cd: bad cmd %d\n", rq->cmd); - cdrom_end_request(0, drive); - return ide_stopped; + if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) + action = cdrom_start_seek (drive, block); + else { + if (rq_data_dir(rq) == READ) + action = cdrom_start_read(drive, block); + else + action = cdrom_start_write(drive, rq); } + info->last_block = block; + return action; + } else if (rq->flags & (REQ_PC | REQ_SENSE)) { + return cdrom_do_packet_command(drive); + } else if (rq->flags & REQ_SPECIAL) { + /* + * right now this can only be a reset... + */ + cdrom_end_request(1, drive); + return ide_do_reset(drive); + } else if (rq->flags & REQ_BLOCK_PC) { + return cdrom_do_block_pc(drive, rq); } + + blk_dump_rq_flags(rq, "ide-cd bad flags"); + cdrom_end_request(0, drive); + return ide_stopped; } @@ -2151,6 +2149,7 @@ return cgc->stat; } + static int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg) @@ -2284,7 +2283,7 @@ int ret; ide_init_drive_cmd (&req); - req.cmd = RESET_DRIVE_COMMAND; + req.flags = REQ_SPECIAL; ret = ide_do_drive_cmd(drive, &req, ide_wait); /* @@ -2673,6 +2672,7 @@ */ set_device_ro(MKDEV(HWIF(drive)->major, minor), 1); set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE); + blk_queue_hardsect_size(&drive->queue, CD_FRAMESIZE); drive->special.all = 0; drive->ready_stat = 0; diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide-disk.c linux/drivers/ide/ide-disk.c --- /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide-disk.c Tue Dec 4 04:41:57 2001 +++ linux/drivers/ide/ide-disk.c Tue Dec 4 05:32:10 2001 @@ -384,7 +384,7 @@ #endif /* CONFIG_BLK_DEV_PDC4030 */ #ifdef DEBUG printk("%s: %sing: LBAsect=%ld, sectors=%ld, buffer=0x%08lx\n", - drive->name, (rq->cmd==READ)?"read":"writ", + drive->name, (rq_data_dir(rq)==READ)?"read":"writ", block, rq->nr_sectors, (unsigned long) rq->buffer); #endif OUT_BYTE(block,IDE_SECTOR_REG); @@ -403,7 +403,7 @@ OUT_BYTE(head|drive->select.all,IDE_SELECT_REG); #ifdef DEBUG printk("%s: %sing: CHS=%d/%d/%d, sectors=%ld, buffer=0x%08lx\n", - drive->name, (rq->cmd==READ)?"read":"writ", cyl, + drive->name, (rq_data_dir(rq)==READ)?"read":"writ", cyl, head, sect, rq->nr_sectors, (unsigned long) rq->buffer); #endif } @@ -413,7 +413,7 @@ return do_pdc4030_io (drive, rq); } #endif /* CONFIG_BLK_DEV_PDC4030 */ - if (rq->cmd == READ) { + if (rq_data_dir(rq) == READ) { #ifdef CONFIG_BLK_DEV_IDEDMA if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_read, drive))) return ide_started; @@ -422,7 +422,7 @@ OUT_BYTE(drive->mult_count ? WIN_MULTREAD : WIN_READ, IDE_COMMAND_REG); return ide_started; } - if (rq->cmd == WRITE) { + if (rq_data_dir(rq) == WRITE) { ide_startstop_t startstop; #ifdef CONFIG_BLK_DEV_IDEDMA if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_write, drive))) @@ -464,7 +464,7 @@ } return ide_started; } - printk(KERN_ERR "%s: bad command: %d\n", drive->name, rq->cmd); + printk(KERN_ERR "%s: bad command: %lx\n", drive->name, rq->flags); ide_end_request(0, HWGROUP(drive)); return ide_stopped; } diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide-dma.c linux/drivers/ide/ide-dma.c --- /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide-dma.c Tue Dec 4 04:41:57 2001 +++ linux/drivers/ide/ide-dma.c Mon Dec 3 12:38:11 2001 @@ -234,7 +234,7 @@ if (nents > rq->nr_segments) printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents); - if (rq->cmd == READ) + if (rq_data_dir(rq) == READ) hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; else hwif->sg_dma_direction = PCI_DMA_TODEVICE; diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide.c linux/drivers/ide/ide.c --- /opt/kernel/linux-2.5.1-pre5/drivers/ide/ide.c Tue Dec 4 04:41:57 2001 +++ linux/drivers/ide/ide.c Wed Dec 5 09:40:31 2001 @@ -151,6 +151,7 @@ #include #include #include +#include #include #include @@ -562,8 +563,7 @@ spin_lock_irqsave(&ide_lock, flags); rq = hwgroup->rq; - if (rq->inactive) - BUG(); + BUG_ON(!(rq->flags & REQ_STARTED)); /* * small hack to eliminate locking from ide_end_request to grab @@ -878,7 +878,7 @@ spin_lock_irqsave(&ide_lock, flags); rq = HWGROUP(drive)->rq; - if (rq->cmd == IDE_DRIVE_CMD) { + if (rq->flags & REQ_DRIVE_CMD) { byte *args = (byte *) rq->buffer; rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); if (args) { @@ -886,7 +886,7 @@ args[1] = err; args[2] = IN_BYTE(IDE_NSECTOR_REG); } - } else if (rq->cmd == IDE_DRIVE_TASK) { + } else if (rq->flags & REQ_DRIVE_TASK) { byte *args = (byte *) rq->buffer; rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); if (args) { @@ -901,8 +901,6 @@ } spin_lock(DRIVE_LOCK(drive)); - if (rq->inactive) - BUG(); blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; end_that_request_last(rq); @@ -1010,7 +1008,7 @@ if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) return ide_stopped; /* retry only "normal" I/O: */ - if (rq->cmd == IDE_DRIVE_CMD || rq->cmd == IDE_DRIVE_TASK) { + if (!(rq->flags & REQ_CMD)) { rq->errors = 1; ide_end_drive_cmd(drive, stat, err); return ide_stopped; @@ -1030,7 +1028,7 @@ else if (err & TRK0_ERR) /* help it find track zero */ rq->errors |= ERROR_RECAL; } - if ((stat & DRQ_STAT) && rq->cmd != WRITE) + if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ) try_to_flush_leftover_data(drive); } if (GET_STAT() & (BUSY_STAT|DRQ_STAT)) @@ -1177,7 +1175,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { byte *args = rq->buffer; - if (args && rq->cmd == IDE_DRIVE_TASK) { + if (args && (rq->flags & REQ_DRIVE_TASK)) { byte sel; #ifdef DEBUG printk("%s: DRIVE_TASK_CMD data=x%02x cmd=0x%02x fr=0x%02x ns=0x%02x sc=0x%02x lcyl=0x%02x hcyl=0x%02x sel=0x%02x\n", @@ -1234,8 +1232,7 @@ unsigned int minor = MINOR(rq->rq_dev), unit = minor >> PARTN_BITS; ide_hwif_t *hwif = HWIF(drive); - if (rq->inactive) - BUG(); + BUG_ON(!(rq->flags & REQ_STARTED)); #ifdef DEBUG printk("%s: start_request: current=0x%08lx\n", hwif->name, (unsigned long) rq); @@ -1259,7 +1256,7 @@ block = rq->sector; /* Strange disk manager remap */ - if ((rq->cmd == READ || rq->cmd == WRITE) && + if ((rq->flags & REQ_CMD) && (drive->media == ide_disk || drive->media == ide_floppy)) { block += drive->sect0; } @@ -1279,9 +1276,9 @@ return startstop; } if (!drive->special.all) { - if (rq->cmd == IDE_DRIVE_CMD || rq->cmd == IDE_DRIVE_TASK) { + if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) return execute_drive_cmd(drive, rq); - } + if (drive->driver != NULL) { return (DRIVER(drive)->do_request(drive, rq, block)); } @@ -1831,7 +1828,7 @@ void ide_init_drive_cmd (struct request *rq) { memset(rq, 0, sizeof(*rq)); - rq->cmd = IDE_DRIVE_CMD; + rq->flags = REQ_DRIVE_CMD; } /* @@ -2612,7 +2609,7 @@ struct request rq; ide_init_drive_cmd(&rq); - rq.cmd = IDE_DRIVE_TASK; + rq.flags = REQ_DRIVE_TASK; rq.buffer = buf; return ide_do_drive_cmd(drive, &rq, ide_wait); } @@ -2835,6 +2832,13 @@ case BLKBSZGET: case BLKBSZSET: return blk_ioctl(inode->i_rdev, cmd, arg); + + /* + * uniform packet command handling + */ + case CDROMEJECT: + case CDROMCLOSETRAY: + return block_ioctl(inode->i_rdev, cmd, arg); case HDIO_GET_BUSSTATE: if (!capable(CAP_SYS_ADMIN)) diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/scsi/ide-scsi.c linux/drivers/scsi/ide-scsi.c --- /opt/kernel/linux-2.5.1-pre5/drivers/scsi/ide-scsi.c Tue Dec 4 04:41:58 2001 +++ linux/drivers/scsi/ide-scsi.c Wed Dec 5 05:16:25 2001 @@ -267,7 +267,7 @@ u8 *scsi_buf; unsigned long flags; - if (rq->cmd != IDESCSI_PC_RQ) { + if (!(rq->flags & REQ_SPECIAL)) { ide_end_request (uptodate, hwgroup); return; } @@ -463,10 +463,10 @@ printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %ld\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors); #endif /* IDESCSI_DEBUG_LOG */ - if (rq->cmd == IDESCSI_PC_RQ) { + if (rq->flags & REQ_SPECIAL) { return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->buffer); } - printk (KERN_ERR "ide-scsi: %s: unsupported command in request queue (%x)\n", drive->name, rq->cmd); + blk_dump_rq_flags(rq, "ide-scsi: unsup command"); idescsi_end_request (0,HWGROUP (drive)); return ide_stopped; } @@ -804,7 +804,7 @@ ide_init_drive_cmd (rq); rq->buffer = (char *) pc; rq->bio = idescsi_dma_bio (drive, pc); - rq->cmd = IDESCSI_PC_RQ; + rq->flags = REQ_SPECIAL; spin_unlock(&cmd->host->host_lock); (void) ide_do_drive_cmd (drive, rq, ide_end); spin_lock_irq(&cmd->host->host_lock); diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- /opt/kernel/linux-2.5.1-pre5/drivers/scsi/scsi_lib.c Tue Dec 4 04:41:59 2001 +++ linux/drivers/scsi/scsi_lib.c Wed Dec 5 10:48:09 2001 @@ -72,13 +72,12 @@ ASSERT_LOCK(&q->queue_lock, 0); - rq->cmd = SPECIAL; + rq->flags = REQ_SPECIAL | REQ_NOMERGE | REQ_BARRIER; rq->special = data; rq->q = NULL; rq->bio = rq->biotail = NULL; rq->nr_segments = 0; rq->elevator_sequence = 0; - rq->inactive = 0; /* * We have the option of inserting the head or the tail of the queue. @@ -262,6 +261,9 @@ * the bad sector. */ SCpnt->request.special = (void *) SCpnt; +#if 0 + SCpnt->request.flags |= REQ_SPECIAL; +#endif list_add(&SCpnt->request.queuelist, &q->queue_head); } @@ -544,7 +546,7 @@ if (bbpnt) { for (i = 0; i < SCpnt->use_sg; i++) { if (bbpnt[i]) { - if (SCpnt->request.cmd == READ) { + if (rq_data_dir(req) == READ) { memcpy(bbpnt[i], sgpnt[i].address, sgpnt[i].length); @@ -556,7 +558,7 @@ scsi_free(SCpnt->buffer, SCpnt->sglist_len); } else { if (SCpnt->buffer != req->buffer) { - if (req->cmd == READ) { + if (rq_data_dir(req) == READ) { unsigned long flags; char *to = bio_kmap_irq(req->bio, &flags); @@ -901,8 +903,7 @@ break; /* - * get next queueable request. cur_rq would be set if we - * previously had to abort for some reason + * get next queueable request. */ req = elv_next_request(q); @@ -916,7 +917,7 @@ * these two cases differently. We differentiate by looking * at request.cmd, as this tells us the real story. */ - if (req->cmd == SPECIAL) { + if (req->flags & REQ_SPECIAL) { STpnt = NULL; SCpnt = (Scsi_Cmnd *) req->special; SRpnt = (Scsi_Request *) req->special; @@ -929,7 +930,7 @@ scsi_init_cmd_from_req(SCpnt, SRpnt); } - } else { + } else if (req->flags & REQ_CMD) { SRpnt = NULL; STpnt = scsi_get_request_dev(req); if (!STpnt) { @@ -938,7 +939,7 @@ /* * Now try and find a command block that we can use. */ - if( req->special != NULL ) { + if (req->special) { SCpnt = (Scsi_Cmnd *) req->special; /* * We need to recount the number of @@ -959,6 +960,9 @@ */ if (!SCpnt) break; + } else { + blk_dump_rq_flags(req, "SCSI bad req"); + break; } /* @@ -997,7 +1001,7 @@ req = NULL; spin_unlock_irq(&q->queue_lock); - if (SCpnt->request.cmd != SPECIAL) { + if (SCpnt->request.flags & REQ_CMD) { /* * This will do a couple of things: * 1) Fill in the actual SCSI command. @@ -1010,9 +1014,9 @@ * some kinds of consistency checking may cause the * request to be rejected immediately. */ - if (STpnt == NULL) { - STpnt = scsi_get_request_dev(req); - } + if (STpnt == NULL) + STpnt = scsi_get_request_dev(&SCpnt->request); + /* * This sets up the scatter-gather table (allocating if * required). Hosts that need bounce buffers will also diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c --- /opt/kernel/linux-2.5.1-pre5/drivers/scsi/scsi_merge.c Tue Dec 4 04:41:59 2001 +++ linux/drivers/scsi/scsi_merge.c Wed Dec 5 12:26:32 2001 @@ -225,7 +225,7 @@ static inline int scsi_new_segment(request_queue_t * q, struct request * req, - struct Scsi_Host *SHpnt) + struct bio *bio) { /* * pci_map_sg won't be able to map these two @@ -234,11 +234,11 @@ */ if (req->nr_hw_segments >= q->max_segments) return 0; - else if (req->nr_segments >= q->max_segments) + else if (req->nr_segments + bio->bi_vcnt > q->max_segments) return 0; - req->nr_hw_segments++; - req->nr_segments++; + req->nr_hw_segments += bio->bi_vcnt; + req->nr_segments += bio->bi_vcnt; return 1; } @@ -246,16 +246,16 @@ static inline int scsi_new_segment(request_queue_t * q, struct request * req, - struct Scsi_Host *SHpnt) + struct bio *bio) { - if (req->nr_segments >= q->max_segments) + if (req->nr_segments + bio->bi_vcnt > q->max_segments) return 0; /* * This will form the start of a new segment. Bump the * counter. */ - req->nr_segments++; + req->nr_segments += bio->bi_vcnt; return 1; } #endif @@ -297,8 +297,6 @@ struct bio *bio, int dma_host) { - Scsi_Device *SDpnt = q->queuedata; - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) return 0; else if (!BIO_SEG_BOUNDARY(q, req->biotail, bio)) @@ -306,9 +304,9 @@ #ifdef DMA_CHUNK_SIZE if (MERGEABLE_BUFFERS(req->biotail, bio)) - return scsi_new_mergeable(q, req, SDpnt->host); + return scsi_new_mergeable(q, req, q->queuedata); #endif - return scsi_new_segment(q, req, SDpnt->host); + return scsi_new_segment(q, req, bio); } __inline static int __scsi_front_merge_fn(request_queue_t * q, @@ -316,8 +314,6 @@ struct bio *bio, int dma_host) { - Scsi_Device *SDpnt = q->queuedata; - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) return 0; else if (!BIO_SEG_BOUNDARY(q, bio, req->bio)) @@ -325,9 +321,9 @@ #ifdef DMA_CHUNK_SIZE if (MERGEABLE_BUFFERS(bio, req->bio)) - return scsi_new_mergeable(q, req, SDpnt->host); + return scsi_new_mergeable(q, req, q->queuedata); #endif - return scsi_new_segment(q, req, SDpnt->host); + return scsi_new_segment(q, req, bio); } /* @@ -686,10 +682,9 @@ } break; } - if (req->cmd == WRITE) { + if (rq_data_dir(req) == WRITE) memcpy(sgpnt[i].address, bbpnt[i], sgpnt[i].length); - } } } return 1; @@ -778,7 +773,7 @@ return 0; } } - if (req->cmd == WRITE) { + if (rq_data_dir(req) == WRITE) { unsigned long flags; char *buf = bio_kmap_irq(bio, &flags); memcpy(buff, buf, this_count << 9); diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/scsi/sd.c linux/drivers/scsi/sd.c --- /opt/kernel/linux-2.5.1-pre5/drivers/scsi/sd.c Tue Dec 4 04:41:59 2001 +++ linux/drivers/scsi/sd.c Mon Dec 3 12:51:24 2001 @@ -354,21 +354,17 @@ this_count = this_count >> 3; } } - switch (SCpnt->request.cmd) { - case WRITE: + if (rq_data_dir(&SCpnt->request) == WRITE) { if (!dpnt->device->writeable) { return 0; } SCpnt->cmnd[0] = WRITE_6; SCpnt->sc_data_direction = SCSI_DATA_WRITE; - break; - case READ: + } else if (rq_data_dir(&SCpnt->request) == READ) { SCpnt->cmnd[0] = READ_6; SCpnt->sc_data_direction = SCSI_DATA_READ; - break; - default: - panic("Unknown sd command %d\n", SCpnt->request.cmd); - } + } else + panic("Unknown sd command %lx\n", SCpnt->request.flags); SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", nbuff, diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/drivers/scsi/sr.c linux/drivers/scsi/sr.c --- /opt/kernel/linux-2.5.1-pre5/drivers/scsi/sr.c Tue Dec 4 04:41:59 2001 +++ linux/drivers/scsi/sr.c Thu Dec 6 07:59:48 2001 @@ -388,7 +388,12 @@ return 0; } - if ((SCpnt->request.cmd == WRITE) && !scsi_CDs[dev].device->writeable) + if (!(SCpnt->request.flags & REQ_CMD)) { + blk_dump_rq_flags(&SCpnt->request, "sr unsup command"); + return 0; + } + + if (rq_data_dir(&SCpnt->request) == WRITE && !scsi_CDs[dev].device->writeable) return 0; /* @@ -408,7 +413,18 @@ return 0; } - block = SCpnt->request.sector / (s_size >> 9); + if (rq_data_dir(&SCpnt->request) == WRITE) { + if (!scsi_CDs[dev].device->writeable) + return 0; + SCpnt->cmnd[0] = WRITE_10; + SCpnt->sc_data_direction = SCSI_DATA_WRITE; + } else if (rq_data_dir(&SCpnt->request) == READ) { + SCpnt->cmnd[0] = READ_10; + SCpnt->sc_data_direction = SCSI_DATA_READ; + } else { + blk_dump_rq_flags(&SCpnt->request, "Unknown sr command"); + return 0; + } /* * request doesn't start on hw block boundary, add scatter pads @@ -419,19 +435,6 @@ this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9); - switch (SCpnt->request.cmd) { - case WRITE: - SCpnt->cmnd[0] = WRITE_10; - SCpnt->sc_data_direction = SCSI_DATA_WRITE; - break; - case READ: - SCpnt->cmnd[0] = READ_10; - SCpnt->sc_data_direction = SCSI_DATA_READ; - break; - default: - printk("Unknown sr command %d\n", SCpnt->request.cmd); - return 0; - } SCSI_LOG_HLQUEUE(2, printk("sr%d : %s %d/%ld 512 byte blocks.\n", devm, @@ -440,6 +443,8 @@ SCpnt->cmnd[1] = (SCpnt->device->scsi_level <= SCSI_2) ? ((SCpnt->lun << 5) & 0xe0) : 0; + + block = SCpnt->request.sector / (s_size >> 9); if (this_count > 0xffff) this_count = 0xffff; diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/fs/bio.c linux/fs/bio.c --- /opt/kernel/linux-2.5.1-pre5/fs/bio.c Tue Dec 4 04:42:00 2001 +++ linux/fs/bio.c Thu Dec 6 07:54:14 2001 @@ -74,7 +74,7 @@ struct bio *bio; if ((bio = bio_pool)) { - BUG_ON(bio_pool_free <= 0); + BIO_BUG_ON(bio_pool_free <= 0); bio_pool = bio->bi_next; bio->bi_next = NULL; bio_pool_free--; @@ -90,7 +90,7 @@ spin_lock_irqsave(&bio_lock, flags); bio = __bio_pool_get(); - BUG_ON(!bio && bio_pool_free); + BIO_BUG_ON(!bio && bio_pool_free); spin_unlock_irqrestore(&bio_lock, flags); return bio; @@ -121,8 +121,7 @@ } } -#define BIO_CAN_WAIT(gfp_mask) \ - (((gfp_mask) & (__GFP_WAIT | __GFP_IO)) == (__GFP_WAIT | __GFP_IO)) +#define BIO_CAN_WAIT(gfp_mask) ((gfp_mask) & __GFP_WAIT) static inline struct bio_vec *bvec_alloc(int gfp_mask, int nr, int *idx) { @@ -198,13 +197,15 @@ { struct biovec_pool *bp = &bvec_list[bio->bi_max]; - BUG_ON(bio->bi_max >= BIOVEC_NR_POOLS); + BIO_BUG_ON(bio->bi_max >= BIOVEC_NR_POOLS); /* * cloned bio doesn't own the veclist */ - if (!(bio->bi_flags & (1 << BIO_CLONED))) + if (!(bio->bi_flags & (1 << BIO_CLONED))) { kmem_cache_free(bp->bp_cachep, bio->bi_io_vec); + wake_up_nr(&bp->bp_wait, 1); + } bio_pool_put(bio); } @@ -212,13 +213,13 @@ inline void bio_init(struct bio *bio) { bio->bi_next = NULL; - atomic_set(&bio->bi_cnt, 1); bio->bi_flags = 0; bio->bi_rw = 0; bio->bi_vcnt = 0; bio->bi_idx = 0; bio->bi_size = 0; bio->bi_end_io = NULL; + atomic_set(&bio->bi_cnt, 1); } static inline struct bio *__bio_alloc(int gfp_mask, bio_destructor_t *dest) @@ -301,6 +302,7 @@ */ static inline void bio_free(struct bio *bio) { + bio->bi_next = NULL; bio->bi_destructor(bio); } @@ -314,16 +316,13 @@ **/ void bio_put(struct bio *bio) { - BUG_ON(!atomic_read(&bio->bi_cnt)); + BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); /* * last put frees it */ - if (atomic_dec_and_test(&bio->bi_cnt)) { - BUG_ON(bio->bi_next); - + if (atomic_dec_and_test(&bio->bi_cnt)) bio_free(bio); - } } /** @@ -459,33 +458,10 @@ static int bio_end_io_kio(struct bio *bio, int nr_sectors) { struct kiobuf *kio = (struct kiobuf *) bio->bi_private; - int uptodate, done; - - done = 0; - uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - do { - int sectors = bio->bi_io_vec[bio->bi_idx].bv_len >> 9; - - nr_sectors -= sectors; - - bio->bi_idx++; - - done = !end_kio_request(kio, uptodate); - - if (bio->bi_idx == bio->bi_vcnt) - done = 1; - } while (!done && nr_sectors > 0); - - /* - * all done - */ - if (done) { - bio_put(bio); - return 0; - } - - return 1; + end_kio_request(kio, test_bit(BIO_UPTODATE, &bio->bi_flags)); + bio_put(bio); + return 0; } /* @@ -553,7 +529,7 @@ max_bytes = get_max_sectors(dev) << 9; max_segments = get_max_segments(dev); if ((max_bytes >> PAGE_SHIFT) < (max_segments + 1)) - max_segments = (max_bytes >> PAGE_SHIFT) + 1; + max_segments = (max_bytes >> PAGE_SHIFT); if (max_segments > BIO_MAX_PAGES) max_segments = BIO_MAX_PAGES; @@ -566,14 +542,12 @@ offset = kio->offset & ~PAGE_MASK; size = kio->length; - /* - * set I/O count to number of pages for now - */ - atomic_set(&kio->io_count, total_nr_pages); + atomic_set(&kio->io_count, 1); map_i = 0; next_chunk: + atomic_inc(&kio->io_count); if ((nr_pages = total_nr_pages) > max_segments) nr_pages = max_segments; @@ -638,6 +612,8 @@ out: if (err) kio->errno = err; + + end_kio_request(kio, !err); } int bio_endio(struct bio *bio, int uptodate, int nr_sectors) diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/fs/partitions/ldm.c linux/fs/partitions/ldm.c --- /opt/kernel/linux-2.5.1-pre5/fs/partitions/ldm.c Mon Nov 12 12:43:11 2001 +++ linux/fs/partitions/ldm.c Mon Dec 3 11:03:19 2001 @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/include/linux/bio.h linux/include/linux/bio.h --- /opt/kernel/linux-2.5.1-pre5/include/linux/bio.h Tue Dec 4 04:42:00 2001 +++ linux/include/linux/bio.h Thu Dec 6 07:50:01 2001 @@ -38,20 +38,26 @@ }; /* + * weee, c forward decl... + */ +struct bio; +typedef int (bio_end_io_t) (struct bio *, int); +typedef void (bio_destructor_t) (struct bio *); + +/* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) */ struct bio { sector_t bi_sector; struct bio *bi_next; /* request queue link */ - atomic_t bi_cnt; /* pin count */ kdev_t bi_dev; /* will be block device */ unsigned long bi_flags; /* status, command, etc */ unsigned long bi_rw; /* bottom bits READ/WRITE, * top bits priority */ - unsigned int bi_vcnt; /* how may bio_vec's */ + unsigned int bi_vcnt; /* how many bio_vec's */ unsigned int bi_idx; /* current index into bvl_vec */ unsigned int bi_size; /* total size in bytes */ unsigned int bi_max; /* max bvl_vecs we can hold, @@ -59,10 +65,12 @@ struct bio_vec *bi_io_vec; /* the actual vec list */ - int (*bi_end_io)(struct bio *bio, int nr_sectors); + bio_end_io_t *bi_end_io; + atomic_t bi_cnt; /* pin count */ + void *bi_private; - void (*bi_destructor)(struct bio *); /* destructor */ + bio_destructor_t *bi_destructor; /* destructor */ }; /* @@ -83,13 +91,13 @@ */ #define BIO_RW 0 #define BIO_RW_AHEAD 1 -#define BIO_BARRIER 2 +#define BIO_RW_BARRIER 2 /* * various member access, note that bio_data should of course not be used * on highmem page vectors */ -#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(bio)->bi_idx])) +#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) #define bio_page(bio) bio_iovec((bio))->bv_page #define __bio_offset(bio, idx) bio_iovec_idx((bio), (idx))->bv_offset @@ -125,9 +133,6 @@ (((addr1) | (mask)) == (((addr2) - 1) | (mask))) #define BIO_SEG_BOUNDARY(q, b1, b2) \ __BIO_SEG_BOUNDARY(bvec_to_phys(__BVEC_END((b1))), bio_to_phys((b2)) + (b2)->bi_size, (q)->seg_boundary_mask) - -typedef int (bio_end_io_t) (struct bio *, int); -typedef void (bio_destructor_t) (struct bio *); #define bio_io_error(bio) bio_endio((bio), 0, bio_sectors((bio))) diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/include/linux/blk.h linux/include/linux/blk.h --- /opt/kernel/linux-2.5.1-pre5/include/linux/blk.h Tue Dec 4 04:42:00 2001 +++ linux/include/linux/blk.h Thu Dec 6 07:50:47 2001 @@ -89,6 +89,44 @@ int end_that_request_first(struct request *, int uptodate, int nr_sectors); void end_that_request_last(struct request *); +#define __elv_next_request(q) (q)->elevator.elevator_next_req_fn((q)) + +extern inline struct request *elv_next_request(request_queue_t *q) +{ + struct request *rq; + + while ((rq = __elv_next_request(q))) { + rq->flags |= REQ_STARTED; + + if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) + break; + + /* + * all ok, break and return it + */ + if (!q->prep_rq_fn(q, rq)) + break; + + /* + * prep said no-go, kill it + */ + blkdev_dequeue_request(rq); + if (end_that_request_first(rq, 0, rq->nr_sectors)) + BUG(); + + end_that_request_last(rq); + } + + return rq; +} + +extern void blk_plug_device(request_queue_t *); +extern inline void elv_add_request(request_queue_t *q, struct request *rq) +{ + blk_plug_device(q); + q->elevator.elevator_add_req_fn(q, rq, q->queue_head.prev); +} + #if defined(MAJOR_NR) || defined(IDE_DRIVER) #undef DEVICE_ON diff -urN -X exclude /opt/kernel/linux-2.5.1-pre5/include/linux/blkdev.h linux/include/linux/blkdev.h --- /opt/kernel/linux-2.5.1-pre5/include/linux/blkdev.h Tue Dec 4 04:42:00 2001 +++ linux/include/linux/blkdev.h Thu Dec 6 07:50:47 2001 @@ -15,21 +15,32 @@ struct elevator_s; typedef struct elevator_s elevator_t; +struct request_list { + unsigned int count; + struct list_head free; + wait_queue_head_t wait; +}; + struct request { struct list_head queuelist; /* looking for ->queue? you must _not_ * access it directly, use * blkdev_dequeue_request! */ int elevator_sequence; - int inactive; /* driver hasn't seen it yet */ + unsigned char cdb[16]; + + unsigned long flags; /* see REQ_ bits below */ int rq_status; /* should split this into a few status bits */ kdev_t rq_dev; - int cmd; /* READ or WRITE */ int errors; sector_t sector; unsigned long nr_sectors; - unsigned long hard_sector, hard_nr_sectors; + unsigned long hard_sector; /* the hard_* are block layer + * internals, no driver should + * touch them + */ + unsigned long hard_nr_sectors; unsigned short nr_segments; unsigned short nr_hw_segments; unsigned int current_nr_sectors; @@ -39,8 +50,49 @@ struct completion *waiting; struct bio *bio, *biotail; request_queue_t *q; + struct request_list *rl; +}; + +/* + * first three bits match BIO_RW* bits, important + */ +enum rq_flag_bits { + __REQ_RW, /* not set, read. set, write */ + __REQ_RW_AHEAD, /* READA */ + __REQ_BARRIER, /* may not be passed */ + __REQ_CMD, /* is a regular fs rw request */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_STARTED, /* drive already may have started this one */ + __REQ_DONTPREP, /* don't call prep for this one */ + /* + * for IDE + */ + __REQ_DRIVE_CMD, + __REQ_DRIVE_TASK, + + __REQ_PC, /* packet command (special) */ + __REQ_BLOCK_PC, /* queued down pc from block layer */ + __REQ_SENSE, /* sense retrival */ + + __REQ_SPECIAL, /* driver special command */ + + __REQ_NR_BITS, /* stops here */ }; +#define REQ_RW (1 << __REQ_RW) +#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD) +#define REQ_BARRIER (1 << __REQ_BARRIER) +#define REQ_CMD (1 << __REQ_CMD) +#define REQ_NOMERGE (1 << __REQ_NOMERGE) +#define REQ_STARTED (1 << __REQ_STARTED) +#define REQ_DONTPREP (1 << __REQ_DONTPREP) +#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD) +#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK) +#define REQ_PC (1 << __REQ_PC) +#define REQ_SENSE (1 << __REQ_SENSE) +#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) +#define REQ_SPECIAL (1 << __REQ_SPECIAL) + #include typedef int (merge_request_fn) (request_queue_t *, struct request *, @@ -50,6 +102,7 @@ typedef void (request_fn_proc) (request_queue_t *q); typedef request_queue_t * (queue_proc) (kdev_t dev); typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); +typedef int (prep_rq_fn) (request_queue_t *, struct request *); typedef void (unplug_device_fn) (void *q); enum blk_queue_state { @@ -63,12 +116,6 @@ */ #define QUEUE_NR_REQUESTS 8192 -struct request_list { - unsigned int count; - struct list_head free; - wait_queue_head_t wait; -}; - struct request_queue { /* @@ -82,17 +129,18 @@ struct list_head queue_head; elevator_t elevator; - request_fn_proc * request_fn; - merge_request_fn * back_merge_fn; - merge_request_fn * front_merge_fn; - merge_requests_fn * merge_requests_fn; - make_request_fn * make_request_fn; + request_fn_proc *request_fn; + merge_request_fn *back_merge_fn; + merge_request_fn *front_merge_fn; + merge_requests_fn *merge_requests_fn; + make_request_fn *make_request_fn; + prep_rq_fn *prep_rq_fn; /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ - void * queuedata; + void *queuedata; /* * queue needs bounce pages for pages above this limit @@ -138,13 +186,12 @@ #define QUEUE_FLAG_CLUSTER 2 /* cluster several segments into 1 */ #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) - #define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) - #define blk_queue_empty(q) elv_queue_empty(q) - #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) +#define rq_data_dir(rq) ((rq)->flags & 1) + /* * noop, requests are automagically marked as active/inactive by I/O * scheduler -- see elv_next_request @@ -153,20 +200,6 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; -#define __elv_next_request(q) (q)->elevator.elevator_next_req_fn((q)) - -extern inline struct request *elv_next_request(request_queue_t *q) -{ - struct request *rq = __elv_next_request(q); - - if (rq) { - rq->inactive = 0; - wmb(); - } - - return rq; -} - #define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT) #define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT) @@ -186,7 +219,8 @@ #endif /* CONFIG_HIGHMEM */ #define rq_for_each_bio(bio, rq) \ - for (bio = (rq)->bio; bio; bio = bio->bi_next) + if ((rq->bio)) \ + for (bio = (rq)->bio; bio; bio = bio->bi_next) struct blk_dev_struct { /* @@ -219,6 +253,10 @@ extern inline request_queue_t *blk_get_queue(kdev_t dev); extern void blkdev_release_request(struct request *); extern void blk_attempt_remerge(request_queue_t *, struct request *); +extern struct request *blk_get_request(request_queue_t *, int, int); +extern void blk_put_request(struct request *); + +extern int block_ioctl(kdev_t, unsigned int, unsigned long); /* * Access functions for manipulating queue properties @@ -233,6 +271,7 @@ extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short); extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); +extern void blk_dump_rq_flags(struct request *, char *); extern void generic_unplug_device(void *); extern int * blk_size[MAX_BLKDEV]; @@ -256,8 +295,7 @@ #define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next) #define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev) -extern void drive_stat_acct (kdev_t dev, int rw, - unsigned long nr_sectors, int new_io); +extern void drive_stat_acct(struct request *, int, int); extern inline void blk_clear(int major) {