diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/Documentation/Configure.help linux/Documentation/Configure.help --- linux-2.3.43-6-clean/Documentation/Configure.help Thu Feb 10 04:37:53 2000 +++ linux/Documentation/Configure.help Wed Feb 9 04:42:07 2000 @@ -438,6 +438,23 @@ say M here and read Documentation/modules.txt. The module will be called ide-cd.o. + +Packet writing on CD/DVD media +CONFIG_CDROM_PACKET + If you have a CDROM drive that supports packet writing, say Y to + include preliminary support. It should work with any MMC/Mt Fuji + complain ATAPI or SCSI drive, which is just about any newer CD + writer. + + If you say Y here, you will be able to transparently write to CD-RW + and CD-R media. Writing is best used with the UDF file system, + so including that is recommended. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read Documentation/modules.txt. The module will be + called packet.o. + Include IDE/ATAPI TAPE support CONFIG_BLK_DEV_IDETAPE If you have an IDE tape drive using the ATAPI protocol, say Y. diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/Config.in linux/drivers/block/Config.in --- linux-2.3.43-6-clean/drivers/block/Config.in Tue Feb 8 22:59:16 2000 +++ linux/drivers/block/Config.in Tue Feb 8 22:16:24 2000 @@ -28,6 +28,9 @@ bool ' Use multi-mode by default' CONFIG_IDEDISK_MULTI_MODE fi dep_tristate ' Include IDE/ATAPI CDROM support' CONFIG_BLK_DEV_IDECD $CONFIG_BLK_DEV_IDE + if [ "$CONFIG_BLK_DEV_IDECD" != "n" ]; then + tristate ' Packet writing on CD/DVD media' CONFIG_CDROM_PACKET + fi dep_tristate ' Include IDE/ATAPI TAPE support' CONFIG_BLK_DEV_IDETAPE $CONFIG_BLK_DEV_IDE dep_tristate ' Include IDE/ATAPI FLOPPY support' CONFIG_BLK_DEV_IDEFLOPPY $CONFIG_BLK_DEV_IDE dep_tristate ' SCSI emulation support' CONFIG_BLK_DEV_IDESCSI $CONFIG_BLK_DEV_IDE diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/Makefile linux/drivers/block/Makefile --- linux-2.3.43-6-clean/drivers/block/Makefile Tue Feb 8 22:59:16 2000 +++ linux/drivers/block/Makefile Tue Feb 8 22:16:24 2000 @@ -26,6 +26,14 @@ LX_OBJS := ll_rw_blk.o blkpg.o MX_OBJS := +ifeq ($(CONFIG_CDROM_PACKET),y) +LX_OBJS += packet.o +else + ifeq ($(CONFIG_CDROM_PACKET),m) + MX_OBJS += packet.o + endif +endif + ifeq ($(CONFIG_MAC_FLOPPY),y) L_OBJS += swim3.o endif diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/ide-cd.c linux/drivers/block/ide-cd.c --- linux-2.3.43-6-clean/drivers/block/ide-cd.c Tue Feb 1 15:44:47 2000 +++ linux/drivers/block/ide-cd.c Thu Feb 10 04:35:33 2000 @@ -277,6 +277,7 @@ #define IDECD_VERSION "4.56" #include +#include #include #include #include @@ -294,6 +295,7 @@ #include #include "ide-cd.h" +#include "packet.h" /**************************************************************************** * Generic packet command support and error handling routines. @@ -313,29 +315,29 @@ static -void cdrom_analyze_sense_data (ide_drive_t *drive, struct request_sense *reqbuf, - struct packet_command *failed_command) +void cdrom_analyze_sense_data (ide_drive_t *drive, struct packet_command *pc, + struct request_sense *sense) { - if (reqbuf->sense_key == NOT_READY || - reqbuf->sense_key == UNIT_ATTENTION) { + if (sense->sense_key == NOT_READY || + sense->sense_key == UNIT_ATTENTION) { /* Make good and sure we've seen this potential media change. Some drives (i.e. Creative) fail to present the correct sense key in the error register. */ - cdrom_saw_media_change (drive); - + cdrom_saw_media_change(drive); + if (pc) + return; /* Don't print not ready or unit attention errors for READ_SUBCHANNEL. Workman (and probably other programs) uses this command to poll the drive, and we don't want to fill the syslog with useless errors. */ - if (failed_command && - failed_command->c[0] == GPCMD_READ_SUBCHANNEL) + if (pc->c[0] == GPCMD_READ_SUBCHANNEL || pc->c[0] == 0) return; } - if (reqbuf->error_code == 0x70 && reqbuf->sense_key == 0x02 - && ((reqbuf->asc == 0x3a && reqbuf->ascq == 0x00) || - (reqbuf->asc == 0x04 && reqbuf->ascq == 0x01))) + if (sense->error_code == 0x70 && sense->sense_key == 0x02 && + ((sense->asc == 0x3a && sense->ascq == 0x00) || + (sense->asc == 0x04 && sense->ascq == 0x01))) { /* * Suppress the following errors: @@ -352,30 +354,32 @@ char buf[80]; printk ("ATAPI device %s:\n", drive->name); - if (reqbuf->error_code==0x70) + if (sense->error_code==0x70) printk(" Error: "); - else if (reqbuf->error_code==0x71) + else if (sense->error_code==0x71) printk(" Deferred Error: "); + else if (sense->error_code == 0x7f) + printk(" Vendor-specific Error: "); else printk(" Unknown Error Type: "); - if ( reqbuf->sense_key < ARY_LEN (sense_key_texts)) - s = sense_key_texts[reqbuf->sense_key]; + if (sense->sense_key < ARY_LEN(sense_key_texts)) + s = sense_key_texts[sense->sense_key]; else s = "bad sense key!"; - printk ("%s -- (Sense key=0x%02x)\n", s, reqbuf->sense_key); + printk("%s -- (Sense key=0x%02x)\n", s, sense->sense_key); - if (reqbuf->asc == 0x40) { - sprintf (buf, "Diagnostic failure on component 0x%02x", - reqbuf->ascq); + if (sense->asc == 0x40) { + sprintf(buf, "Diagnostic failure on component 0x%02x", + sense->ascq); s = buf; } else { - int lo=0, mid, hi=ARY_LEN (sense_data_texts); - unsigned long key = (reqbuf->sense_key << 16); - key |= (reqbuf->asc << 8); - if ( ! (reqbuf->ascq >= 0x80 && reqbuf->ascq <= 0xdd) ) - key |= reqbuf->ascq; + int lo = 0, mid, hi = ARY_LEN(sense_data_texts); + unsigned long key = (sense->sense_key << 16); + key |= (sense->asc << 8); + if (!(sense->ascq >=0x80 && sense->ascq <=0xdd)) + key |= sense->ascq; s = NULL; while (hi > lo) { @@ -393,35 +397,35 @@ } if (s == NULL) { - if (reqbuf->asc > 0x80) + if (sense->asc > 0x80) s = "(vendor-specific error)"; else s = "(reserved error code)"; } printk (" %s -- (asc=0x%02x, ascq=0x%02x)\n", - s, reqbuf->asc, reqbuf->ascq); + s, sense->asc, sense->ascq); - if (failed_command != NULL) { + if (pc != NULL) { - int lo=0, mid, hi= ARY_LEN (packet_command_texts); + int lo = 0, mid, hi = ARY_LEN(packet_command_texts); s = NULL; while (hi > lo) { mid = (lo + hi) / 2; - if (packet_command_texts[mid].packet_command == failed_command->c[0]) { + if (packet_command_texts[mid].packet_command == pc->c[0]) { s = packet_command_texts[mid].text; break; } - else if (packet_command_texts[mid].packet_command > failed_command->c[0]) + else if (packet_command_texts[mid].packet_command > pc->c[0]) hi = mid; else lo = mid+1; } printk (" The failed \"%s\" packet command was: \n \"", s); - for (i=0; ic); i++) - printk ("%02x ", failed_command->c[i]); + for (i=0; i < sizeof(pc->c); i++) + printk ("%02x ", pc->c[i]); printk ("\"\n"); } @@ -430,21 +434,22 @@ * In the case of NOT_READY, if SKSV is set the drive can * give us nice ETA readings. */ - if (reqbuf->sense_key == NOT_READY && (reqbuf->sks[0] & 0x80)) { - int progress = (reqbuf->sks[1] << 8 | reqbuf->sks[2]) * 100; + if (sense->sense_key == NOT_READY && + (sense->sks[0] & 0x80)) { + int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100; printk(" Command is %02d%% complete\n", progress / 0xffff); } - if (reqbuf->sense_key == ILLEGAL_REQUEST && - (reqbuf->sks[0] & 0x80) != 0) { - printk (" Error in %s byte %d", - (reqbuf->sks[0] & 0x40) != 0 ? + if (sense->sense_key == ILLEGAL_REQUEST && + (sense->sks[0] & 0x80) != 0) { + printk(" Error in %s byte %d", + (sense->sks[0] & 0x40) != 0 ? "command packet" : "command data", - (reqbuf->sks[1] << 8) + reqbuf->sks[2]); + (sense->sks[1] << 8) + sense->sks[2]); - if ((reqbuf->sks[0] & 0x40) != 0) - printk (" bit %d", reqbuf->sks[0] & 0x07); + if ((sense->sks[0] & 0x40) != 0) + printk(" bit %d", sense->sks[0] & 0x07); printk ("\n"); } @@ -455,63 +460,79 @@ /* Suppress printing unit attention and `in progress of becoming ready' errors when we're not being verbose. */ - if (reqbuf->sense_key == UNIT_ATTENTION || - (reqbuf->sense_key == NOT_READY && (reqbuf->asc == 4 || - reqbuf->asc == 0x3a))) + if (sense->sense_key == UNIT_ATTENTION || + (sense->sense_key == NOT_READY && (sense->asc == 4 || + sense->asc == 0x3a))) return; printk ("%s: error code: 0x%02x sense_key: 0x%02x asc: 0x%02x ascq: 0x%02x\n", - drive->name, - reqbuf->error_code, reqbuf->sense_key, - reqbuf->asc, reqbuf->ascq); + drive->name, sense->error_code, sense->sense_key, + sense->asc, sense->ascq); #endif /* not VERBOSE_IDE_CD_ERRORS */ } -static void cdrom_queue_request_sense (ide_drive_t *drive, - struct semaphore *sem, - struct packet_command *failed_command) +static void cdrom_queue_request_sense(ide_drive_t *drive, + struct semaphore *sem, + struct request_sense *sense, + struct packet_command *failed_cmd) { struct cdrom_info *info = drive->driver_data; struct request *rq; - struct packet_command *pc; + struct packet_command *pc = &info->request_sense_pc;; - /* Make up a new request to retrieve sense information. */ - pc = &info->request_sense_pc; - memset(pc, 0, sizeof (*pc)); + if (sense == NULL) + sense = &info->sense_data; + memset(pc, 0, sizeof(struct packet_command)); pc->c[0] = GPCMD_REQUEST_SENSE; - - /* just get the first 18 bytes of the sense info, there might not - * be more available */ pc->c[4] = pc->buflen = 18; - pc->buffer = (char *)&info->sense_data; - pc->sense_data = (struct request_sense *)failed_command; + pc->buffer = (char *) sense; + pc->sense = (struct request_sense *) failed_cmd; /* stuff the sense request in front of our current request */ rq = &info->request_sense_request; - ide_init_drive_cmd (rq); + ide_init_drive_cmd(rq); rq->cmd = REQUEST_SENSE_COMMAND; - rq->buffer = (char *)pc; + rq->buffer = (char *) pc; rq->sem = sem; - (void) ide_do_drive_cmd (drive, rq, ide_preempt); + (void) ide_do_drive_cmd(drive, rq, ide_preempt); } - static void cdrom_end_request (int uptodate, ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate) { - struct packet_command *pc = (struct packet_command *) - rq->buffer; - cdrom_analyze_sense_data (drive, - (struct request_sense *) (pc->buffer - pc->c[4]), - (struct packet_command *) pc->sense_data); + struct packet_command *pc = (struct packet_command *)rq->buffer; + cdrom_analyze_sense_data(drive, + (struct packet_command *) pc->sense, + (struct request_sense *) (pc->buffer - pc->c[4])); + } + if (rq->cmd == READ || rq->cmd == WRITE_PACKET) + if (!rq->current_nr_sectors) + uptodate = 1; + + if (rq->cmd == WRITE_PACKET && !uptodate) { + struct request *rq = HWGROUP(drive)->rq; + unsigned long flags; + int i; + +// printk("ending %s rq %lu\n", uptodate ? "good" : "dirty", rq->nr_sectors); + + spin_lock_irqsave(&io_request_lock, flags); + i = rq->nr_sectors; + for (i = rq->nr_sectors; i > 0; i -= rq->current_nr_sectors) + end_that_request_first(rq, 1, drive->name); + + HWGROUP(drive)->drive->queue.current_request = rq->next; + blk_dev[MAJOR(rq->rq_dev)].request_queue.current_request = NULL; + HWGROUP(drive)->rq = NULL; + end_that_request_last(rq); + spin_unlock_irqrestore(&io_request_lock, flags); + return; } - if (rq->cmd == READ && !rq->current_nr_sectors) - uptodate = 1; - ide_end_request (uptodate, HWGROUP(drive)); + ide_end_request(uptodate, HWGROUP(drive)); } @@ -522,7 +543,7 @@ { struct request *rq = HWGROUP(drive)->rq; int stat, cmd, err, sense_key; - struct packet_command *pc = (struct packet_command *) rq->buffer; + struct packet_command *pc; /* Check for errors. */ stat = GET_STAT(); @@ -545,7 +566,7 @@ /* We got an error trying to get sense info from the drive (probably while trying to recover from a former error). Just give up. */ - + pc = (struct packet_command *) rq->buffer; pc->stat = 1; cdrom_end_request (1, drive); *startstop = ide_error (drive, "request sense failure", stat); @@ -555,6 +576,7 @@ /* All other functions, except for READ. */ struct semaphore *sem = NULL; + pc = (struct packet_command *) rq->buffer; /* Check for tray open. */ if (sense_key == NOT_READY) { @@ -588,9 +610,10 @@ cdrom_end_request (1, drive); if ((stat & ERR_STAT) != 0) - cdrom_queue_request_sense(drive, sem, pc); + cdrom_queue_request_sense(drive, sem, pc->sense, + pc); } else { - /* Handle errors from READ requests. */ + /* Handle errors from READ and WRITE requests. */ if (sense_key == NOT_READY) { /* Tray open. */ @@ -627,7 +650,7 @@ /* If we got a CHECK_CONDITION status, queue a request sense command. */ if ((stat & ERR_STAT) != 0) - cdrom_queue_request_sense(drive, NULL, NULL); + cdrom_queue_request_sense(drive, NULL, NULL, NULL); } } @@ -642,11 +665,22 @@ struct packet_command *pc = (struct packet_command *) rq->buffer; unsigned long wait = 0; - /* blank and format can take an extremly long time to - * complete, if the IMMED bit was not set. + /* + * Some commands are *slow* and normally take a long time to + * complete. Usually we can use the ATAPI "disconnect" to bypass + * this, but not all commands/drives support that. Let + * ide_timer_expiry keep polling us for these. */ - if (pc->c[0] == GPCMD_BLANK || pc->c[0] == GPCMD_FORMAT_UNIT) - wait = 60*60*HZ; + switch (pc->c[0]) { + case GPCMD_BLANK: + case GPCMD_FORMAT_UNIT: + case GPCMD_RESERVE_RZONE_TRACK: + wait = WAIT_CMD; + break; + default: + wait = 0; + break; + } return wait; } @@ -658,8 +692,10 @@ called when the interrupt from the drive arrives. Otherwise, HANDLER will be called immediately after the drive is prepared for the transfer. */ -static ide_startstop_t cdrom_start_packet_command (ide_drive_t *drive, int xferlen, - ide_handler_t *handler) +static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, + int xferlen, + ide_handler_t *handler, + int cmd) { ide_startstop_t startstop; struct cdrom_info *info = drive->driver_data; @@ -668,8 +704,15 @@ if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) return startstop; - if (info->dma) - info->dma = !HWIF(drive)->dmaproc(ide_dma_read, drive); + if (info->dma) { + if (cmd == READ) { + info->dma = !HWIF(drive)->dmaproc(ide_dma_read, drive); + } else if (cmd == WRITE) { + info->dma = !HWIF(drive)->dmaproc(ide_dma_write, drive); + } else { + printk("ide-cd: DMA set, but not allowed\n"); + } + } /* Set up the controller registers. */ OUT_BYTE (info->dma, IDE_FEATURE_REG); @@ -701,7 +744,8 @@ or there's data ready. */ static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive, unsigned char *cmd_buf, int cmd_len, - ide_handler_t *handler) + ide_handler_t *handler, + unsigned timeout) { if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) { /* Here we should have been called after receiving an interrupt @@ -710,17 +754,17 @@ ide_startstop_t startstop; /* Check for errors. */ - if (cdrom_decode_status (&startstop, drive, DRQ_STAT, &stat_dum)) + if (cdrom_decode_status(&startstop, drive, DRQ_STAT, &stat_dum)) return startstop; } else { ide_startstop_t startstop; /* Otherwise, we must wait for DRQ to get set. */ - if (ide_wait_stat (&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) + if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) return startstop; } /* Arm the interrupt handler. */ - ide_set_handler (drive, handler, WAIT_CMD, cdrom_timer_expiry); + ide_set_handler(drive, handler, timeout, cdrom_timer_expiry); /* Send the command to the device. */ atapi_output_bytes (drive, cmd_buf, cmd_len); @@ -1057,15 +1101,15 @@ (65534 / CD_FRAMESIZE) : 65535); /* Set up the command */ - memset (&pc.c, 0, sizeof (pc.c)); + memset(&pc.c, 0, sizeof(pc.c)); pc.c[0] = GPCMD_READ_10; pc.c[7] = (nframes >> 8); pc.c[8] = (nframes & 0xff); - put_unaligned(htonl (frame), (unsigned int *) &pc.c[2]); + put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]); /* Send the command to the drive and return. */ - return cdrom_transfer_packet_command (drive, pc.c, sizeof (pc.c), - &cdrom_read_intr); + return cdrom_transfer_packet_command(drive, pc.c, sizeof (pc.c), + &cdrom_read_intr, WAIT_CMD); } @@ -1105,10 +1149,11 @@ sector -= nskip; frame = sector / SECTORS_PER_FRAME; - memset (&pc.c, 0, sizeof (pc.c)); + memset(&pc.c, 0, sizeof(pc.c)); pc.c[0] = GPCMD_SEEK; put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]); - return cdrom_transfer_packet_command (drive, pc.c, sizeof (pc.c), &cdrom_seek_intr); + return cdrom_transfer_packet_command(drive, pc.c, sizeof (pc.c), + &cdrom_seek_intr, WAIT_CMD); } static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block) @@ -1117,7 +1162,7 @@ info->dma = 0; info->start_seek = jiffies; - return cdrom_start_packet_command (drive, 0, cdrom_start_seek_continuation); + return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation, 0); } /* Fix up a possibly partially-processed request so that we can @@ -1169,7 +1214,7 @@ info->dma = 0; /* Start sending the read request to the drive. */ - return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation); + return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation, READ); } /**************************************************************************** @@ -1177,19 +1222,17 @@ */ /* Forward declarations. */ -static int cdrom_lockdoor(ide_drive_t *drive, int lockflag); +static int cdrom_lockdoor(ide_drive_t *drive, int lockflag, + struct request_sense *sense); /* Interrupt routine for packet command completion. */ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive) { int ireason, len, stat, thislen; struct request *rq = HWGROUP(drive)->rq; - struct packet_command *pc = (struct packet_command *)rq->buffer; - struct cdrom_info *info = drive->driver_data; + struct packet_command *pc = (struct packet_command *) rq->buffer; ide_startstop_t startstop; - pc->sense_data = &info->sense_data; - /* Check for errors. */ if (cdrom_decode_status (&startstop, drive, 0, &stat)) return startstop; @@ -1286,8 +1329,8 @@ struct packet_command *pc = (struct packet_command *)rq->buffer; /* Send the command to the drive and return. */ - return cdrom_transfer_packet_command (drive, pc->c, - sizeof (pc->c), &cdrom_pc_intr); + return cdrom_transfer_packet_command(drive, pc->c, sizeof(pc->c), + &cdrom_pc_intr, WAIT_CMD); } @@ -1303,7 +1346,7 @@ len = pc->buflen; /* Start sending the command to the drive. */ - return cdrom_start_packet_command (drive, len, cdrom_do_pc_continuation); + return cdrom_start_packet_command(drive, len, cdrom_do_pc_continuation, 0); } @@ -1319,8 +1362,12 @@ static int cdrom_queue_packet_command(ide_drive_t *drive, struct packet_command *pc) { - int retries = 10; + struct request_sense sense; struct request req; + int retries = 10; + + if (pc->sense == NULL) + pc->sense = &sense; /* Start of retry loop. */ do { @@ -1336,7 +1383,7 @@ /* The request failed. Retry if it was due to a unit attention status (usually means media was changed). */ - struct request_sense *reqbuf = pc->sense_data; + struct request_sense *reqbuf = pc->sense; if (reqbuf->sense_key == UNIT_ATTENTION) cdrom_saw_media_change (drive); @@ -1357,31 +1404,174 @@ } while (pc->stat != 0 && retries >= 0); /* Return an error if the command failed. */ - if (pc->stat) - return -EIO; + return pc->stat ? -EIO : 0; +} + +/* + * Write handling + */ +static int cdrom_write_check_ireason(ide_drive_t *drive, int len, + int ireason) +{ + /* Two notes about IDE interrupt reason here - 0 means that + * the drive wants to receive data from us, 2 means that + * the drive is expecting data from us. + */ + ireason &= 3; + + if (ireason == 2) { + /* Whoops... The drive wants to send data. */ + printk("%s: cdrom_write_intr: wrong transfer direction!\n", + drive->name); + + /* Throw some data at the drive so it doesn't hang + and quit this request. */ + while (len > 0) { + int dum = 0; + atapi_output_bytes(drive, &dum, sizeof(dum)); + len -= sizeof(dum); + } + } else { + /* Drive wants a command packet, or invalid ireason... */ + printk("%s: cdrom_write_intr: bad interrupt reason %d\n", + drive->name, ireason); + } + + cdrom_end_request(0, drive); + return -1; +} + +static ide_startstop_t cdrom_write_intr(ide_drive_t *drive) +{ + int stat, ireason, len, sectors_to_transfer; + struct cdrom_info *info = drive->driver_data; + int i, dma_error = 0, dma = info->dma; + ide_startstop_t startstop; + + struct request *rq = HWGROUP(drive)->rq; + + /* Check for errors. */ + if (dma) { + info->dma = 0; + if ((dma_error = HWIF(drive)->dmaproc(ide_dma_end, drive))) { + printk("ide-cd: write dma error\n"); + HWIF(drive)->dmaproc(ide_dma_off, drive); + } + } - /* The command succeeded. If it was anything other than - a request sense, eject, or door lock command, - and we think that the door is presently unlocked, lock it - again. (The door was probably unlocked via an explicit - CDROMEJECT ioctl.) */ - if (CDROM_STATE_FLAGS (drive)->door_locked == 0 && - (pc->c[0] != GPCMD_TEST_UNIT_READY && - pc->c[0] != GPCMD_REQUEST_SENSE && - pc->c[0] != GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL && - pc->c[0] != GPCMD_START_STOP_UNIT && - pc->c[0] != GPCMD_MODE_SENSE_10 && - pc->c[0] != GPCMD_MODE_SELECT_10)) { - (void) cdrom_lockdoor (drive, 1); + if (cdrom_decode_status(&startstop, drive, 0, &stat)) { + printk("ide-cd: write_intr decode_status bad\n"); + return startstop; } - return 0; + + if (dma) { + if (dma_error) + return ide_error(drive, "dma error", stat); + + rq = HWGROUP(drive)->rq; + for (i = rq->nr_sectors; i > 0;) { + i -= rq->current_nr_sectors; + ide_end_request(1, HWGROUP(drive)); + } + return ide_stopped; + } + + /* Read the interrupt reason and the transfer length. */ + ireason = IN_BYTE(IDE_NSECTOR_REG); + len = IN_BYTE(IDE_LCYL_REG) + 256 * IN_BYTE(IDE_HCYL_REG); + + /* If DRQ is clear, the command has completed. */ + if ((stat & DRQ_STAT) == 0) { + /* If we're not done writing, complain. + * Otherwise, complete the command normally. + */ + if (rq->current_nr_sectors > 0) { + printk("%s: write_intr: data underrun (%ld blocks)\n", + drive->name, rq->current_nr_sectors); + cdrom_end_request(0, drive); + } else + cdrom_end_request(1, drive); + return ide_stopped; + } + + /* Check that the drive is expecting to do the same thing we are. */ + if (ireason & 3) + if (cdrom_write_check_ireason(drive, len, ireason)) + return ide_stopped; + + /* The number of sectors we need to read from the drive. */ + sectors_to_transfer = len / SECTOR_SIZE; + + /* Now loop while we still have data to read from the drive. DMA + * transfers will already have been complete + */ + while (sectors_to_transfer > 0) { + unsigned long flags; + + /* If we've filled the present buffer but there's another + chained buffer after it, move on. */ + if (rq->current_nr_sectors == 0 && rq->nr_sectors > 0) + cdrom_end_request(1, drive); + + atapi_output_bytes(drive, rq->buffer, rq->current_nr_sectors); + spin_lock_irqsave(&io_request_lock, flags); + rq->nr_sectors -= rq->current_nr_sectors; + rq->current_nr_sectors = 0; + rq->sector += rq->current_nr_sectors; + sectors_to_transfer -= rq->current_nr_sectors; + spin_unlock_irqrestore(&io_request_lock, flags); + } + + /* arm handler */ + ide_set_handler(drive, &cdrom_write_intr, 5 * WAIT_CMD, NULL); + return ide_started; +} + +static ide_startstop_t cdrom_start_write_cont(ide_drive_t *drive) +{ + struct packet_command pc; + struct request *rq = HWGROUP(drive)->rq; + int nframes, frame; + + nframes = rq->nr_sectors >> 2; + frame = rq->sector >> 2; + + memset(&pc.c, 0, sizeof(pc.c)); + /* + * we might as well use WRITE_12, but none of the device I have + * support the streaming feature anyway, so who cares. + */ + pc.c[0] = GPCMD_WRITE_10; +#if 0 + pc.c[1] = 1 << 3; +#endif + pc.c[7] = (nframes >> 8) & 0xff; + pc.c[8] = nframes & 0xff; + put_unaligned(cpu_to_be32(frame), (unsigned int *)&pc.c[2]); + + return cdrom_transfer_packet_command(drive, pc.c, sizeof(pc.c), + cdrom_write_intr, 2 * WAIT_CMD); +} + +static ide_startstop_t cdrom_start_write(ide_drive_t *drive) +{ + struct cdrom_info *info = drive->driver_data; + + info->nsectors_buffered = 0; + + /* use dma, if possible. we don't need to check more, since we + * know that the transfer is always (at least!) 2KB aligned */ + info->dma = drive->using_dma ? 1 : 0; + + /* Start sending the read request to the drive. */ + return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont, WRITE); } /**************************************************************************** * cdrom driver request routine. */ static ide_startstop_t -ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, unsigned long block) +ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, unsigned long block) { ide_startstop_t action; struct cdrom_info *info = drive->driver_data; @@ -1418,7 +1608,29 @@ cdrom_end_request(1, drive); return ide_do_reset(drive); } - +#if defined(CONFIG_CDROM_PACKET) || defined(CONFIG_CDROM_PACKET_MODULE) + case WRITE: { + request_queue_t *q; + /* buy us a little time */ + if (CDROM_CONFIG_FLAGS(drive)->writing) { + ide_stall_queue(drive, IDECD_SEEK_TIMER); + return ide_stopped; + } + CDROM_CONFIG_FLAGS(drive)->writing = 1; + q = ide_get_queue(rq->rq_dev); + printk("ide-cd: got WRITE\n"); + pkt_handle_request(q); + /* not really stopped, but we don't want the next + * request to be tossed at us yet. + */ + return ide_stopped; + } + case WRITE_PACKET: { + printk("ide-cd: got WRITE_PACKET %lu-%lu\n", rq->sector, rq->nr_sectors); + CDROM_CONFIG_FLAGS(drive)->writing = 0; + return cdrom_start_write(drive); +#endif + } default: { printk("ide-cd: bad cmd %d\n", rq -> cmd); cdrom_end_request(0, drive); @@ -1482,13 +1694,14 @@ return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; } -static int cdrom_check_status (ide_drive_t *drive) +static int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) { struct packet_command pc; struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; memset(&pc, 0, sizeof(pc)); + pc.sense = sense; pc.c[0] = GPCMD_TEST_UNIT_READY; @@ -1505,24 +1718,26 @@ /* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ static int -cdrom_lockdoor(ide_drive_t *drive, int lockflag) +cdrom_lockdoor(ide_drive_t *drive, int lockflag, struct request_sense *sense) { - struct request_sense *sense; + struct request_sense my_sense; struct packet_command pc; int stat; + if (sense == NULL) + sense = &my_sense; + /* If the drive cannot lock the door, just pretend. */ if (CDROM_CONFIG_FLAGS (drive)->no_doorlock) stat = 0; else { memset(&pc, 0, sizeof(pc)); + pc.sense = sense; pc.c[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; pc.c[4] = (lockflag != 0); stat = cdrom_queue_packet_command (drive, &pc); } - sense = pc.sense_data; - /* If we got an illegal field error, the drive probably cannot lock the door. */ if (stat != 0 && @@ -1547,7 +1762,8 @@ /* Eject the disk if EJECTFLAG is 0. If EJECTFLAG is 1, try to reload the disk. */ -static int cdrom_eject(ide_drive_t *drive, int ejectflag) +static int cdrom_eject(ide_drive_t *drive, int ejectflag, + struct request_sense *sense) { struct packet_command pc; @@ -1559,13 +1775,15 @@ return 0; memset(&pc, 0, sizeof (pc)); + pc.sense = sense; pc.c[0] = GPCMD_START_STOP_UNIT; pc.c[4] = 0x02 + (ejectflag != 0); return cdrom_queue_packet_command (drive, &pc); } -static int cdrom_read_capacity(ide_drive_t *drive, unsigned *capacity) +static int cdrom_read_capacity(ide_drive_t *drive, unsigned *capacity, + struct request_sense *sense) { struct { __u32 lba; @@ -1576,6 +1794,7 @@ struct packet_command pc; memset(&pc, 0, sizeof (pc)); + pc.sense = sense; pc.c[0] = GPCMD_READ_CDVD_CAPACITY; pc.buffer = (char *)&capbuf; @@ -1589,11 +1808,13 @@ } static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, - int format, char *buf, int buflen) + int format, char *buf, int buflen, + struct request_sense *sense) { struct packet_command pc; memset(&pc, 0, sizeof(pc)); + pc.sense = sense; pc.buffer = buf; pc.buflen = buflen; @@ -1611,7 +1832,7 @@ /* Try to read the entire TOC for the disk into our internal buffer. */ -static int cdrom_read_toc (ide_drive_t *drive) +static int cdrom_read_toc (ide_drive_t *drive, struct request_sense *sense) { int stat, ntracks, i; struct cdrom_info *info = drive->driver_data; @@ -1636,13 +1857,13 @@ /* Check to see if the existing data is still valid. If it is, just return. */ if (CDROM_STATE_FLAGS (drive)->toc_valid) - (void) cdrom_check_status(drive); + (void) cdrom_check_status(drive, sense); if (CDROM_STATE_FLAGS (drive)->toc_valid) return 0; /* First read just the header, so we know how long the TOC is. */ - stat = cdrom_read_tocentry (drive, 0, 1, 0, (char *)&toc->hdr, - sizeof (struct atapi_toc_header)); + stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *)&toc->hdr, + sizeof(struct atapi_toc_header), sense); if (stat) return stat; #if ! STANDARD_ATAPI @@ -1660,7 +1881,7 @@ stat = cdrom_read_tocentry (drive, toc->hdr.first_track, 1, 0, (char *)&toc->hdr, sizeof (struct atapi_toc_header) + (ntracks + 1) * - sizeof (struct atapi_toc_entry)); + sizeof (struct atapi_toc_entry), sense); if (stat && toc->hdr.first_track > 1) { /* Cds with CDI tracks only don't have any TOC entries, @@ -1675,9 +1896,9 @@ ntracks = 0; stat = cdrom_read_tocentry (drive, CDROM_LEADOUT, 1, 0, (char *)&toc->hdr, - sizeof (struct atapi_toc_header) + + sizeof(struct atapi_toc_header) + (ntracks+1) * - sizeof (struct atapi_toc_entry)); + sizeof(struct atapi_toc_entry), sense); if (stat) { return stat; } @@ -1721,8 +1942,8 @@ /* Read the multisession information. */ if (toc->hdr.first_track != CDROM_LEADOUT) { /* Read the multisession information. */ - stat = cdrom_read_tocentry (drive, 0, 1, 1, - (char *)&ms_tmp, sizeof (ms_tmp)); + stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, + sizeof (ms_tmp), sense); if (stat) return stat; } else { ms_tmp.ent.addr.msf.minute = 0; @@ -1748,7 +1969,7 @@ (long *)&toc->capacity); if (stat) #endif - stat = cdrom_read_capacity (drive, &toc->capacity); + stat = cdrom_read_capacity (drive, &toc->capacity, sense); if (stat) toc->capacity = 0x1fffff; /* for general /dev/cdrom like mounting, one big disc */ @@ -1764,29 +1985,32 @@ return 0; /* setup each minor to respond to a session */ +#if 0 minor++; i = toc->hdr.first_track; while ((i <= ntracks) && ((minor & CD_PART_MASK) < CD_PART_MAX)) { drive->part[minor & PARTN_MASK].start_sect = 0; drive->part[minor & PARTN_MASK].nr_sects = (toc->ent[i].addr.lba * - SECTORS_PER_FRAME) << (BLOCK_SIZE_BITS - 9); + SECTORS_PER_FRAME) << 2; HWIF(drive)->gd->sizes[minor] = (toc->ent[i].addr.lba * SECTORS_PER_FRAME) >> (BLOCK_SIZE_BITS - 9); i++; minor++; } +#endif return 0; } static int cdrom_read_subchannel(ide_drive_t *drive, int format, char *buf, - int buflen) + int buflen, struct request_sense *sense) { struct packet_command pc; memset(&pc, 0, sizeof(pc)); + pc.sense = sense; pc.buffer = buf; pc.buflen = buflen; @@ -1801,10 +2025,12 @@ /* ATAPI cdrom drives are free to select the speed you request or any slower rate :-( Requesting too fast a speed will _not_ produce an error. */ -static int cdrom_select_speed (ide_drive_t *drive, int speed) +static int cdrom_select_speed (ide_drive_t *drive, int speed, + struct request_sense *sense) { struct packet_command pc; memset(&pc, 0, sizeof(pc)); + pc.sense = sense; if (speed == 0) speed = 0xffff; /* set to max */ @@ -1829,7 +2055,8 @@ static int cdrom_get_toc_entry(ide_drive_t *drive, int track, - struct atapi_toc_entry **ent) + struct atapi_toc_entry **ent, + struct request_sense *sense) { struct cdrom_info *info = drive->driver_data; struct atapi_toc *toc = info->toc; @@ -1869,9 +2096,7 @@ pc.buflen = cgc->buflen; cgc->stat = cdrom_queue_packet_command(drive, &pc); - /* There was an error, assign sense. */ - if (cgc->stat) - cgc->sense = pc.sense_data; + cgc->sense = pc.sense; return cgc->stat; } @@ -1937,7 +2162,7 @@ struct atapi_toc *toc; /* Make sure our saved TOC is valid. */ - stat = cdrom_read_toc(drive); + stat = cdrom_read_toc(drive, NULL); if (stat) return stat; toc = info->toc; @@ -1952,7 +2177,8 @@ struct cdrom_tocentry *tocentry = (struct cdrom_tocentry*) arg; struct atapi_toc_entry *toce; - stat = cdrom_get_toc_entry (drive, tocentry->cdte_track, &toce); + stat = cdrom_get_toc_entry(drive, tocentry->cdte_track, &toce, + NULL); if (stat) return stat; tocentry->cdte_ctrl = toce->control; @@ -1977,11 +2203,21 @@ int ide_cdrom_reset (struct cdrom_device_info *cdi) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; + struct request_sense sense; struct request req; + int ret; ide_init_drive_cmd (&req); req.cmd = RESET_DRIVE_COMMAND; - return ide_do_drive_cmd (drive, &req, ide_wait); + ret = ide_do_drive_cmd (drive, &req, ide_wait); + + /* A reset will unlock the door. If it was previously locked, + * lock it again. + */ + if (CDROM_STATE_FLAGS(drive)->door_locked) + (void) cdrom_lockdoor(drive, 1, &sense); + + return ret; } @@ -1989,20 +2225,21 @@ int ide_cdrom_tray_move (struct cdrom_device_info *cdi, int position) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; + struct request_sense sense; if (position) { - int stat = cdrom_lockdoor (drive, 0); + int stat = cdrom_lockdoor(drive, 0, &sense); if (stat) return stat; } - return cdrom_eject(drive, !position); + return cdrom_eject(drive, !position, &sense); } static int ide_cdrom_lock_door (struct cdrom_device_info *cdi, int lock) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; - return cdrom_lockdoor (drive, lock); + return cdrom_lockdoor(drive, lock, NULL); } #undef __ACER50__ @@ -2060,20 +2297,19 @@ { #ifndef __ACER50__ int stat, attempts = 3; - ide_drive_t *drive = (ide_drive_t*) cdi->handle; - struct cdrom_generic_command cgc; struct { char pad[8]; struct atapi_capabilities_page cap; } buf; #else int stat; - ide_drive_t *drive = (ide_drive_t*) cdi->handle; - struct cdrom_generic_command cgc; struct get_capabilities_buf buf; #endif /* __ACER50__ */ + ide_drive_t *drive = (ide_drive_t*) cdi->handle; + struct cdrom_generic_command cgc; + struct request_sense sense; - if ((stat = cdrom_select_speed (drive, speed)) < 0) + if ((stat = cdrom_select_speed (drive, speed, &sense)) < 0) return stat; init_cdrom_command(&cgc, &buf, sizeof(buf)); @@ -2111,19 +2347,18 @@ int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr) { ide_drive_t *drive = (ide_drive_t*) cdi->handle; - struct cdrom_info *info = drive->driver_data; if (slot_nr == CDSL_CURRENT) { - struct request_sense *sense = &info->sense_data; - int stat = cdrom_check_status(drive); - if (stat == 0 || sense->sense_key == UNIT_ATTENTION) + struct request_sense sense; + int stat = cdrom_check_status(drive, &sense); + if (stat == 0 || sense.sense_key == UNIT_ATTENTION) return CDS_DISC_OK; - if (sense->sense_key == NOT_READY && sense->asc == 0x04 && - sense->ascq == 0x04) + if (sense.sense_key == NOT_READY && sense.asc == 0x04 && + sense.ascq == 0x04) return CDS_DISC_OK; - if (sense->sense_key == NOT_READY) { + if (sense.sense_key == NOT_READY) { /* ATAPI doesn't have anything that can help us decide whether the drive is really emtpy or the tray is just open. irk. */ @@ -2160,7 +2395,7 @@ ide_drive_t *drive = (ide_drive_t*) cdi->handle; /* get MCN */ - if ((stat = cdrom_read_subchannel(drive, 2, mcnbuf, sizeof (mcnbuf)))) + if ((stat = cdrom_read_subchannel(drive, 2, mcnbuf, sizeof(mcnbuf), NULL))) return stat; memcpy (mcn_info->medium_catalog_number, mcnbuf+9, @@ -2184,7 +2419,7 @@ ide_drive_t *drive = (ide_drive_t*) cdi->handle; if (slot_nr == CDSL_CURRENT) { - (void) cdrom_check_status(drive); + (void) cdrom_check_status(drive, NULL); CDROM_STATE_FLAGS (drive)->media_changed = 0; return CDROM_STATE_FLAGS (drive)->media_changed; } else { @@ -2244,8 +2479,13 @@ struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *devinfo = &info->devinfo; int minor = (drive->select.b.unit)<dev = MKDEV (HWIF(drive)->major, minor | CD_PART_MASK); +#else + devinfo->dev = MKDEV (HWIF(drive)->major, minor); +#endif devinfo->ops = &ide_cdrom_dops; devinfo->mask = 0; *(int *)&devinfo->speed = CDROM_STATE_FLAGS (drive)->current_speed; @@ -2270,8 +2510,16 @@ devinfo->mask |= CDC_PLAY_AUDIO; if (!CDROM_CONFIG_FLAGS (drive)->close_tray) devinfo->mask |= CDC_CLOSE_TRAY; - - return register_cdrom (devinfo); + + ret = register_cdrom(devinfo); + +#if defined(CONFIG_CDROM_PACKET) || defined(CONFIG_CDROM_PACKET_MODULE) + if (!ret && CDROM_CONFIG_FLAGS(drive)->cd_rw) + if ((pkt_register(devinfo))) + printk("failed to register packet\n"); +#endif + + return ret; } @@ -2421,7 +2669,7 @@ int major = HWIF(drive)->major; int minor = drive->select.b.unit << PARTN_BITS; - ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL); + ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 1024, &read_ahead[major], NULL); ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor], NULL); ide_add_setting(drive, "max_kb_per_request", SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL); ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); @@ -2435,7 +2683,7 @@ int minor = drive->select.b.unit << PARTN_BITS; int nslots; - set_device_ro(MKDEV(HWIF(drive)->major, minor), 1); + set_device_ro(MKDEV(HWIF(drive)->major, minor), 0); set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE); drive->special.all = 0; @@ -2581,7 +2829,13 @@ MOD_INC_USE_COUNT; if (info->buffer == NULL) info->buffer = (char *) kmalloc(SECTOR_BUFFER_SIZE, GFP_KERNEL); - rc = cdrom_fops.open (ip, fp); + rc = cdrom_fops.open(ip, fp); +#if defined(CONFIG_CDROM_PACKET) || defined(CONFIG_CDROM_PACKET_MODULE) + if ((rc == 0) && fp->f_mode & FMODE_WRITE) { + if (pkt_open_dev(ip->i_rdev)) + rc = -EROFS; + } +#endif if (rc) { drive->usage--; MOD_DEC_USE_COUNT; @@ -2594,6 +2848,9 @@ ide_drive_t *drive) { cdrom_fops.release (inode, file); +#if defined(CONFIG_CDROM_PACKET) || defined(CONFIG_CDROM_PACKET_MODULE) + pkt_release_dev(inode->i_rdev); +#endif MOD_DEC_USE_COUNT; } @@ -2611,7 +2868,7 @@ struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *devinfo = &info->devinfo; - if (ide_unregister_subdriver (drive)) + if (ide_unregister_subdriver(drive)) return 1; if (info->buffer != NULL) kfree(info->buffer); @@ -2619,7 +2876,10 @@ kfree(info->toc); if (info->changer_info != NULL) kfree(info->changer_info); - if (devinfo->handle == drive && unregister_cdrom (devinfo)) +#if defined(CONFIG_CDROM_PACKET) || defined(CONFIG_CDROM_PACKET_MODULE) + pkt_unregister(devinfo); +#endif + if (devinfo->handle == drive && unregister_cdrom(devinfo)) printk ("%s: ide_cdrom_cleanup failed to unregister device from the cdrom driver.\n", drive->name); kfree(info); drive->driver_data = NULL; diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/ide-cd.h linux/drivers/block/ide-cd.h --- linux-2.3.43-6-clean/drivers/block/ide-cd.h Sat Jan 8 00:30:16 2000 +++ linux/drivers/block/ide-cd.h Sun Feb 6 23:18:47 2000 @@ -103,7 +103,7 @@ char *buffer; int buflen; int stat; - struct request_sense *sense_data; + struct request_sense *sense; unsigned char c[12]; }; @@ -626,7 +626,9 @@ "Logical unit not ready - in progress [sic] of becoming ready" }, { 0x020402, "Logical unit not ready - initializing command required" }, { 0x020403, "Logical unit not ready - manual intervention required" }, - { 0x020404, "In process of becoming ready - writing" }, + { 0x020404, "Logical unit not ready - format in progress" }, + { 0x020407, "Logical unit not ready - operation in progress" }, + { 0x020408, "Logical unit not ready - long write in progress" }, { 0x020600, "No reference position found (media may be upside down)" }, { 0x023000, "Incompatible medium installed" }, { 0x023a00, "Medium not present" }, @@ -676,7 +678,6 @@ { 0x04b600, "Media load mechanism failed" }, { 0x051a00, "Parameter list length error" }, { 0x052000, "Invalid command operation code" }, - { 0x052c00, "Command sequence error" }, { 0x052100, "Logical block address out of range" }, { 0x052102, "Invalid address for write" }, { 0x052400, "Invalid field in command packet" }, diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/ide.c linux/drivers/block/ide.c --- linux-2.3.43-6-clean/drivers/block/ide.c Tue Feb 8 22:59:16 2000 +++ linux/drivers/block/ide.c Tue Feb 8 22:16:24 2000 @@ -1095,11 +1095,13 @@ #endif block = rq->sector; blockend = block + rq->nr_sectors; +#if 0 if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) { printk("%s%c: bad access: block=%ld, count=%ld\n", drive->name, (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors); goto kill_rq; } +#endif block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0; /* Yecch - this will shift the entire interval, @@ -1417,9 +1419,9 @@ } } else { ide_drive_t *drive = hwgroup->drive; - hwgroup->handler = NULL; if (!drive) { printk("ide_timer_expiry: hwgroup->drive was NULL\n"); + hwgroup->handler = NULL; } else { ide_hwif_t *hwif; ide_startstop_t startstop; @@ -1431,12 +1433,13 @@ /* continue */ if ((wait = expiry(drive)) != 0) { /* reset timer */ - hwgroup->timer.expires = jiffies + wait; + hwgroup->timer.expires = jiffies + wait; add_timer(&hwgroup->timer); spin_unlock_irqrestore(&io_request_lock, flags); return; } } + hwgroup->handler = NULL; /* * We need to simulate a real interrupt when invoking * the handler() function, which means we need to globally diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- linux-2.3.43-6-clean/drivers/block/ll_rw_blk.c Tue Feb 8 22:59:16 2000 +++ linux/drivers/block/ll_rw_blk.c Wed Feb 9 23:12:52 2000 @@ -3,6 +3,7 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, Karl Keyte: Added support for disk statistics + * Copyright (C) 2000 Jens Axboe : new merge logic */ /* @@ -167,21 +168,41 @@ q->make_request_fn = mfn; } -static int ll_merge_fn(request_queue_t *q, struct request *req, - struct buffer_head *bh) +static int inline ll_merge_bh_fn(struct request *req, struct buffer_head *bh) { - if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) { + if (req->sector + req->nr_sectors == bh->b_rsector) { + req->bhtail->b_reqnext = bh; + req->bhtail = bh; + req->nr_sectors += bh->b_size >> 9; + return 1; + } + if ((req->sector - (bh->b_size >> 9)) == bh->b_rsector) { + bh->b_reqnext = req->bh; + req->bh = bh; + req->buffer = bh->b_data; + req->current_nr_sectors = bh->b_size >> 9; + req->sector = bh->b_rsector; + req->nr_sectors += bh->b_size >> 9; + return 1; + } + return 0; +} + +static int inline ll_merge_fn(request_queue_t *q, struct request *req, + struct buffer_head *bh) +{ + if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) { if (req->nr_segments < MAX_SEGMENTS) { - req->nr_segments++; + req->nr_segments++; return 1; } return 0; - } + } return 1; } -static int ll_merge_requests_fn(request_queue_t *q, struct request *req, - struct request *next) +static int inline ll_merge_requests_fn(request_queue_t *q, struct request *req, + struct request *next) { int total_segments = req->nr_segments + next->nr_segments; @@ -197,23 +218,24 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) { - q->request_fn = rfn; + q->request_fn = rfn; q->current_request = NULL; - q->merge_fn = ll_merge_fn; + q->merge_fn = ll_merge_fn; q->merge_requests_fn = ll_merge_requests_fn; + q->merge_bh_fn = ll_merge_bh_fn; + q->plug_tq.sync = 0; + q->plug_tq.routine = generic_unplug_device; + q->plug_tq.data = q; + q->plugged = 0; q->make_request_fn = NULL; - q->plug_tq.sync = 0; - q->plug_tq.routine = &generic_unplug_device; - q->plug_tq.data = q; - q->plugged = 0; /* * These booleans describe the queue properties. We set the * default (and most common) values here. Other drivers can * use the appropriate functions to alter the queue properties. * as appropriate. */ - q->plug_device_fn = NULL; - q->head_active = 1; + q->plug_device_fn = NULL; + q->head_active = 1; } /* @@ -404,9 +426,9 @@ struct request * tmp; drive_stat_acct(req, req->nr_sectors, 1); - req->next = NULL; if (!(tmp = q->current_request)) { + req->next = NULL; q->current_request = req; return; } @@ -446,9 +468,8 @@ /* * Has to be called with the request spinlock aquired */ -static inline void attempt_merge (request_queue_t * q, - struct request *req, - int max_sectors) +static inline void attempt_merge(request_queue_t *q, struct request *req, + int max_sectors) { struct request *next = req->next; @@ -472,7 +493,7 @@ req->nr_sectors += next->nr_sectors; next->rq_status = RQ_INACTIVE; req->next = next->next; - wake_up (&wait_for_request); + wake_up(&wait_for_request); } static inline void __make_request(request_queue_t * q, int rw, @@ -487,10 +508,6 @@ count = bh->b_size >> 9; sector = bh->b_rsector; - /* It had better not be a new buffer by the time we see it */ - if (buffer_new(bh)) - BUG(); - if (blk_size[major]) { unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1; @@ -612,57 +629,15 @@ continue; if (req->rq_dev != bh->b_rdev) continue; - /* Can we add it to the end of this request? */ - if (req->sector + req->nr_sectors == sector) { - /* - * The merge_fn is a more advanced way - * of accomplishing the same task. Instead - * of applying a fixed limit of some sort - * we instead define a function which can - * determine whether or not it is safe to - * merge the request or not. - * - * See if this queue has rules that - * may suggest that we shouldn't merge - * this - */ - if(!(q->merge_fn)(q, req, bh)) - continue; - req->bhtail->b_reqnext = bh; - req->bhtail = bh; - req->nr_sectors += count; - drive_stat_acct(req, count, 0); - /* Can we now merge this req with the next? */ - attempt_merge(q, req, max_sectors); - /* or to the beginning? */ - } else if (req->sector - count == sector) { - /* - * The merge_fn is a more advanced way - * of accomplishing the same task. Instead - * of applying a fixed limit of some sort - * we instead define a function which can - * determine whether or not it is safe to - * merge the request or not. - * - * See if this queue has rules that - * may suggest that we shouldn't merge - * this - */ - if(!(q->merge_fn)(q, req, bh)) - continue; - bh->b_reqnext = req->bh; - req->bh = bh; - req->buffer = bh->b_data; - req->current_nr_sectors = count; - req->sector = sector; - req->nr_sectors += count; - drive_stat_acct(req, count, 0); - } else + if (!(q->merge_fn(q, req, bh))) + continue; + if (!(q->merge_bh_fn(req, bh))) continue; + drive_stat_acct(req, count, 0); + attempt_merge(q, req, max_sectors); spin_unlock_irqrestore(&io_request_lock,flags); return; - } while ((req = req->next) != NULL); /* find an unused request. */ @@ -711,7 +686,6 @@ req->sem = NULL; req->bh = bh; req->bhtail = bh; - req->next = NULL; __add_request(q, req); spin_unlock_irqrestore(&io_request_lock, flags); return; @@ -858,6 +832,7 @@ bh->b_end_io(bh, uptodate); if ((bh = req->bh) != NULL) { req->current_nr_sectors = bh->b_size >> 9; + req->sector = bh->b_rsector; if (req->nr_sectors < req->current_nr_sectors) { req->nr_sectors = req->current_nr_sectors; printk("end_request: buffer-list destroyed\n"); @@ -888,10 +863,8 @@ } req = all_requests + NR_REQUEST; - while (--req >= all_requests) { + while (--req >= all_requests) req->rq_status = RQ_INACTIVE; - req->next = NULL; - } memset(ro_bits,0,sizeof(ro_bits)); memset(max_readahead, 0, sizeof(max_readahead)); memset(max_sectors, 0, sizeof(max_sectors)); @@ -1002,3 +975,5 @@ EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_queue_headactive); +EXPORT_SYMBOL(blk_get_queue); +EXPORT_SYMBOL(generic_make_request); diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/packet.c linux/drivers/block/packet.c --- linux-2.3.43-6-clean/drivers/block/packet.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/block/packet.c Thu Feb 10 04:34:40 2000 @@ -0,0 +1,1083 @@ +/* + * Copyright (C) 2000 Jens Axboe + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and + * DVD-RW devices. + * + * + * TODO: (circa order of when I will fix it) + * - do proper LRA for effiency, if possible (for CD-RW) + * - fix SCSI layer so we can use SCSI CD writers too + * - Only able to write on CD-RW media right now. + * - Generic interface for UDF to submit large packets for variable length + * packet writing. + * - (in correlation with above) interface for UDF <-> packet to negotiate + * a new location when a write fails. + * - handle OPC, especially for -RW media + * - /proc/driver/packet with info + * - Lots of stuff ;) + * + * ---------------------------------- + * 0.01 Jan 19, 2000 -- First test release. + * + *************************************************************************/ + +#define VERSION_CODE "v0.0.1e Jens Axboe " + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "packet.h" + +extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); +extern void end_buffer_io_async(struct buffer_head *bh, int uptodate); + +/* + * 1 for normal debug messages, 2 is very verbose. 0 to turn it off. + */ +#define PACKET_DEBUG 2 + +#define PACKET_MAJOR 42 +#define MAX_WRITERS 4 + +/* + * 64 * 1024 sectors + */ +#define PACKET_MAX_SIZE 64 + +#define NEXT_BH(bh, nbh) \ + (((bh->b_rsector + (bh->b_size >> 9)) == nbh->b_rsector) ? 1 : 0) + +#if PACKET_DEBUG +#define ASSERT(expr) \ + if (!(expr)) { \ + printk("assert failed %s,%s at %d\n", \ + #expr, __FUNCTION__, __LINE__); \ + } +#else +#define ASSERT(expr) +#endif + +#if PACKET_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) +#else +#define DPRINTK(fmt, args...) +#endif + +#if PACKET_DEBUG > 1 +#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) +#else +#define VPRINTK(fmt, args...) +#endif + +static int *packet_sizes; +static int *packet_blksize; +static struct packet_device *packet_devs; + +static inline struct packet_device *pkt_find_dev(kdev_t dev) +{ + int i; + + for (i = 0; i < MAX_WRITERS; i++) + if (packet_devs[i].dev == dev) + return &packet_devs[i]; + + return NULL; +} + +static int pkt_open(struct inode *inode, struct file *file) +{ + struct packet_device *pd; + + if ((pd = pkt_find_dev(inode->i_rdev)) == NULL) + return -ENXIO; + + file->private_data = pd; + + VPRINTK("opened dev %x\n", pd->dev); + MOD_INC_USE_COUNT; + return 0; +} + +static int pkt_close(struct inode *inode, struct file *file) +{ + struct packet_device *pd = (struct packet_device *)file->private_data; + + VPRINTK("closed dev %x\n", pd->dev); + MOD_DEC_USE_COUNT; + return 0; +} + +static int pkt_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct packet_device *pd = (struct packet_device *) file->private_data; + + switch (cmd) { + case PACKET_GET_STATS: { + if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats))) + return -EFAULT; + } + default: { + printk("Unknown ioctl for %x\n", pd->dev); + return -ENOTTY; + } + } + return 0; +} + +/* + * write mode select package based on pd->settings + */ +static int pkt_write_settings(struct packet_device *pd) +{ + write_param_page wp; + struct cdrom_generic_command cgc; + int stat; + + memset(&wp, 0, sizeof(write_param_page)); + init_cdrom_command(&cgc, &wp, sizeof(write_param_page)); + + stat = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); + if (stat) + return stat; + + wp.fp = pd->settings.fp; + wp.track_mode = pd->settings.track_mode; + wp.write_type = pd->settings.write_type; + wp.data_block_type = pd->settings.block_mode; + + wp.session_format = 0x20; + wp.multi_session = 0; + + /* FIXME: only for FP */ + wp.subhdr2 = 0x20; + wp.packet_size = cpu_to_be32(pd->settings.size >> 2); + + /* some writers are so picky about length ;) */ + cgc.buflen = cgc.cmd[8] = wp.header.desc_length; + if ((stat = cdrom_mode_select(pd->cdi, &cgc))) + return stat; + + /* settings now saved! print info */ + printk("packet: %s writable with %u sized %s packets\n", pd->name, + pd->settings.size >> 2, + pd->settings.fp ? "fixed" : "variable"); + + return 0; +} + +/* + * 0 -- we can write to this track, 1 -- we can't + */ +static int pkt_good_track(track_information *ti) +{ + /* only good for CD-RW at the moment, not DVD-RW */ + + /* FIXME: only for FP */ + if (ti->fp == 0) + return 0; + + if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1) + return 0; + + if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1) + return 0; + + if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1) + return 0; + + printk("packet: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); + return 1; +} + +/* + * 0 -- we can write to this disc, 1 -- we can't + */ +static int pkt_good_disc(struct packet_device *pd, disc_information *di) +{ + /* for disc type 0xff we should probably reserve a new track. + * but i'm not sure, should we leave this to user apps? probably. + */ + if (di->disc_type == 0xff) { + printk("packet: unknown disc. do you need to create a track?\n"); + return 1; + } + + if (di->disc_type != 0x20 && di->disc_type != 0) { + printk("packet: wrong disc type (%x)\n", di->disc_type); + return 1; + } + + if (di->erasable == 0) { + printk("packet: disc not erasable\n"); + return 1; + } + + if (pd->track_status == PACKET_SESSION_RESERVED) { + printk("packet: can't write to last track\n"); + return 1; + } + + return 0; +} + +static int pkt_probe_settings(struct packet_device *pd) +{ + disc_information di; + track_information ti; + int stat, track; + + memset(&di, 0, sizeof(disc_information)); + memset(&ti, 0, sizeof(track_information)); + + if ((stat = cdrom_get_disc_info(pd->dev, &di))) { + printk("failed get_disc\n"); + return stat; + } + + pd->disc_status = di.disc_status; + pd->track_status = di.border_status; + + if (pkt_good_disc(pd, &di)) + return -ENXIO; + + printk("packet: inserted media is CD-R%s\n", di.erasable ? "W" : ""); + + track = (di.last_track_msb << 8) | di.last_track_lsb; + if ((stat = cdrom_get_track_info(pd->dev, track, 1, &ti))) { + printk("failed get_track\n"); + return stat; + } + + if (pkt_good_track(&ti)) { + printk("packet: can't write to this track\n"); + return -ENXIO; + } + +#if 0 + blk_size[MAJOR(pd->dev)][MINOR(pd->dev)] = ti.track_size >> 1; +#endif + pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; + pd->settings.fp = ti.fp; + + if (ti.nwa_v) { + pd->nwa = be32_to_cpu(ti.next_writable); + pd->flags |= PACKET_NWA_VALID; + } + + if (ti.lra_v) { + pd->lra = be32_to_cpu(ti.last_rec_address); + pd->flags |= PACKET_LRA_VALID; + } else { + pd->lra = 0xffffffff; + pd->flags |= PACKET_LRA_VALID; + } + + /* fine for now */ + pd->settings.link_loss = 0x10; + pd->settings.write_type = 0; /* packet */ + pd->settings.track_mode = ti.track_mode; + if (ti.data_mode == 1) + pd->settings.block_mode = 8; /* Mode 1 */ + else if (ti.data_mode == 2) + pd->settings.block_mode = 10; + else { + printk("packet: unknown data mode (%x)\n", ti.data_mode); + return 1; + } + return 0; +} + +/* + * shows requests in the device queue --- just a debugging thing + */ +#if 0 +static void pkt_show_requests(request_queue_t *q, int max) +{ + struct request *rq = q->current_request; + struct buffer_head *bh; + int i = 0; + + for (rq = q->current_request; rq && (i < max); rq = rq->next, i++) { + printk("rq %lu (in %lu) at %lu\n", rq->nr_sectors, + rq->current_nr_sectors, + rq->sector); + bh = rq->bh; + while (bh) { + printk("cluster at %lu (%u)\n", bh->b_rsector, bh->b_size); + bh = bh->b_reqnext; + } + } +} +#endif + +/* + * Add the request queue to the list. Should already be plugged, otherwise + * we wouldn't have gotten the request. + */ +static void pkt_schedule_queue(request_queue_t *q) +{ + q->plugged = 1; + queue_task(&q->plug_tq, &tq_disk); +} + +#define ZONE(sector, span) ((sector) - ((sector) % (span))) + +/* + * the following two are indentical to ll_rw_blk defaults right now. to + * get some extra performance, these should be modified to accept + * merges as long as the new request is within the current packet + * boundary. pkt_request() can already deal with holes. + */ +static int pkt_merge_fn(request_queue_t *q, struct request *rq, + struct buffer_head *bh) +{ + struct packet_device *pd = pkt_find_dev(rq->rq_dev); + int span = pd->settings.size; + + /* + * dev might not be setup yet, so check span + */ + if (span) { + if (ZONE(rq->sector, span) != ZONE(bh->b_rsector, span)) + return 0; + } + + return pd->q->merge_fn(q, rq, bh); +} + +/* + * rules similar to above + */ +static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq, + struct request *next) +{ + struct packet_device *pd = pkt_find_dev(rq->rq_dev); + int span = pd->settings.size; + + if (span) { + if (ZONE(rq->sector, span) != ZONE(next->sector + next->nr_sectors, span)) + return 0; + } + + return pd->q->merge_requests_fn(q, rq, next); +} + +#define BH_IN_ORDER(b1, b2) ((b1)->b_rsector < (b2)->b_rsector) + +/* + * FIXME: add hole support + */ +static int pkt_merge_bh_fn(struct request *rq, struct buffer_head *bh) +{ + struct packet_device *pd = pkt_find_dev(rq->rq_dev); + struct buffer_head *tmp; + + /* check if block default merge_bh allows it */ + if (pd->q->merge_bh_fn(rq, bh)) + return 1; + + /* holes only supported for writing */ + if (rq->cmd == READ) + return 0; + + /* stuff in front? */ + if (bh->b_rsector < rq->sector) { + bh->b_reqnext = rq->bh; + rq->bh = bh; + rq->sector = bh->b_rsector; + rq->current_nr_sectors = bh->b_size >> 9; + goto out; + } + + /* stuff in back? */ + if (bh->b_rsector > rq->bhtail->b_rsector) { + rq->bhtail->b_reqnext = bh; + rq->bhtail = bh; + goto out; + } + + /* find sweet spot */ + for (tmp = rq->bh; tmp->b_reqnext; tmp = tmp->b_reqnext) + if (BH_IN_ORDER(tmp, bh) && BH_IN_ORDER(bh, tmp->b_reqnext)) + break; + bh->b_reqnext = tmp->b_reqnext; + tmp->b_reqnext = bh; +out: + rq->buffer = rq->bh->b_data; + rq->nr_sectors += bh->b_size >> 9; + rq->nr_segments++; + return 1; +} + +request_queue_t *pkt_get_queue(kdev_t dev) +{ + struct blk_dev_struct *bdev = blk_dev + MAJOR(dev); +#if defined(CONFIG_SMP) + if (!test_bit(0, &io_request_lock)) + printk("queue gotten without lock held!\n"); +#endif + + if (bdev->queue) + return bdev->queue(dev); + else + return &blk_dev[MAJOR(dev)].request_queue; +} + + +/* + * Remap current request to a new location. Needs to "discuss" these + * things with UDF. + */ +void pkt_remap_write(kdev_t dev) +{ + struct buffer_head *bh; + request_queue_t *q; + + /* FIXME: bh should probably be marked BH_Protected so that + * they are kept in the cache until a write is ended. Then + * we can quickly get ahold of them again and resubmit a + * write to a new location + */ + q = pkt_get_queue(dev); + bh = q->current_request->bh; + + /* release buffers */ + while (bh) { + /* + * FIXME: should be 0, but for now we don't want any + * resubmissions + */ + mark_buffer_uptodate(bh, 1); + unlock_buffer(bh); + bh = bh->b_reqnext; + } + q->current_request = q->current_request->next; +} + +/* + * we use this as our default b_end_io handler, since we need to take + * the entire request off the list if just on of the clusters fail. + * later one this should also talk to UDF about relocating blocks -- for + * now we just drop the rq entirely. when doing the relocating we must also + * lock the bh down to ensure that we can easily reconstruct the write should + * it fail. + */ +static void pkt_end_io_write(struct buffer_head *bh, int uptodate) +{ + struct packet_device *pd = pkt_find_dev(bh->b_dev); + + if (!uptodate) { + pkt_remap_write(bh->b_dev); + return; + } +#if 0 + if ((bh->b_rsector > pd->lra) + pd->lra = bh->b_rsector; +#endif + VPRINTK("ended bh %lu\n", bh->b_rsector); + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); + if (bh->b_dev_id == pkt_end_io_write) { + bh->b_dev_id = NULL; + brelse(bh); + } + pd->stats.bh_e++; +} + +static void pkt_show_stats(kdev_t dev) +{ + struct packet_device *pd = pkt_find_dev(dev); + + printk("BH: started (%lu) ended (%lu)\n", pd->stats.bh_s, pd->stats.bh_e); +} + +/* + * replace b_end_io with our own, so that we can keep bhs in cache for + * the duration of the entire write. + */ +static void pkt_init_bh(struct request *rq) +{ + struct buffer_head *bh; + unsigned cnt = 0; + + bh = rq->bh; + while (bh) { + /* FIXME */ + if (bh->b_end_io == end_buffer_io_async) + panic("woops, page cache...\n"); + if (bh->b_rdev == rq->rq_dev) + bh->b_end_io = pkt_end_io_write; + else + printk("23hj4j123h54j1h4kj32h4kj2\n"); + if (!buffer_uptodate(bh) && bh->b_dev_id != pkt_end_io_write) + printk("that's why?!\n"); + bh = bh->b_reqnext; + cnt += rq->current_nr_sectors; + } + if (cnt != rq->nr_sectors) { + pkt_show_stats(rq->rq_dev); + show_buffers(); + spin_unlock_irq(&io_request_lock); + panic("botched request %u (%lu)\n", cnt, rq->nr_sectors); + } +} + +static void pkt_add_stats(struct packet_device *pd, int read, int written, int bs) +{ + pd->stats.bh_s += (written / bs); + pd->stats.blocks_written += written; + pd->stats.blocks_read += read; +} + +static void pkt_show_bhstring(struct buffer_head *bh) +{ + int i = 0; + + while (bh && i < 128) { + DPRINTK("bh at %lu\n", bh->b_rsector); + bh = bh->b_reqnext; + i++; + } + DPRINTK("%u bhs in all\n", i); +} + +/* + * does request span two packets? 0 == yes, 1 == no + */ +static inline int pkt_same_zone(struct packet_device *pd, struct request *rq) +{ + int span = pd->settings.size; + + /* debug stuff */ + if (span == 0) + panic("packet size WRONG for pd %s!\n", pd->name); + + if (ZONE(rq->sector, span) != ZONE(rq->sector + rq->nr_sectors-1, span)) + return 0; + + return 1; +} + +/* + * Put request rq into the stored queue + */ +static inline void pkt_insert_rq(request_queue_t *q, struct request *rq) +{ + struct request *foo = (struct request *) q->queuedata; + + if (foo == NULL) { + q->queuedata = rq; + return; + } + + while (foo->next) + foo = foo->next; + + rq->next = foo->next; + foo->next = rq; +} + +/* + * pkt_handle_request is invoked from the driver request functions, + * when they receive a write. Two things can happen from here: + * + * 1) current request fits perfectly with the packet size of the + * device and first sector is packet size aligned. change cmd to + * WRITE_PACKET and requeue. + * 2) current request is incomplete. Unplug device and, queue the request + * for us (to not reenter request function of device). We will then + * gather remaining data and resubmit a complete write. + * + * Assumptions: + * - request will not be delivered that is > that packet size. this + * is handled by the merge function. + * - request will not span two packets. this is also handled by the + * merge function (identical to above, actually) + * - io_request_lock spin lock is not held when entered + * - driver can handle plugging + */ +void pkt_handle_request(request_queue_t *q) +{ + struct request *rq; + struct packet_device *pd; + unsigned long nr_sectors, flags; + + spin_lock_irqsave(&io_request_lock, flags); + + rq = q->current_request; + ASSERT(rq); + pd = pkt_find_dev(rq->rq_dev); + rq = q->current_request; + nr_sectors = pd->settings.size; + + if (!pkt_same_zone(pd, rq)) { + printk("rq %lu to %lu\n", rq->sector, rq->nr_sectors); + printk("bh %lu to %lu\n", rq->bh->b_rsector, rq->bhtail->b_rsector); + panic("merging is wrong\n"); + } + + /* case 1: don't you know it, write fits like a glove */ + if (rq->nr_sectors == nr_sectors) { + rq->cmd = WRITE_PACKET; + pkt_init_bh(rq); + pkt_add_stats(pd, 0, nr_sectors, rq->current_nr_sectors); + spin_unlock_irqrestore(&io_request_lock, flags); + return; + } + + /* + * too big. should never happen, since the merge function must + * say no to a merge that will make the request bigger than + * the packet size + */ + if (rq->nr_sectors > nr_sectors) { + printk("packet: request too big (%lu)!\n", rq->nr_sectors); + /* FIXME: need to end it properly */ + q->current_request->rq_status = RQ_INACTIVE; + q->current_request = q->current_request->next; + spin_unlock_irqrestore(&io_request_lock, flags); + return; + } + + /* so we get this far - the rest is case 2 */ + /* reassign to us, to gather data */ + if (q->current_request->next) + pkt_insert_rq(pd->q, q->current_request->next); + q->current_request = NULL; + if (rq->next) + printk("next on is there\n"); + pd->q->current_request = rq; + pd->q->current_request->next = NULL; + pkt_schedule_queue(pd->q); + spin_unlock_irqrestore(&io_request_lock, flags); +} + +/* + * basically just does a ll_rw_block for the bhs given to use, but we + * don't return until we have them. + */ +static void pkt_read_bhlist(struct packet_device *pd, + struct buffer_head *bhlist[], int count) +{ + struct buffer_head *bh; + int i; + + ll_rw_block(READ, count, bhlist); + for (i = 0; i < count; i++) { + bh = bhlist[i]; + /* zero blocks that haven't been written yet */ +#if 0 + if (pd->flags & PACKET_LRA_VALID && (bh->b_rsector < pd->lra)) { + memset(bh->b_data, 0, bh->b_size); + continue; + } +#endif + if (!(buffer_uptodate(bh))) { + VPRINTK("waiting on buffer %lu\n", bh->b_rsector); + wait_on_buffer(bh); + } + } +} + +/* pkt_request is our internal request function. It gets invoked + * whenever we have scheduled a request for ourselves and gathers data + * for case 2 and bottom half of case 3 (listed above). + * + * with our own merge function, there can essentially be holes in + * a request so cover that case as well. as long as all bh in the + * request cluster are of the same size (they damn well better be!), + * we are fine. + * + * Locks: io_request_lock held when entered + */ +static void pkt_request(request_queue_t *q) +{ + request_queue_t *wq; + struct packet_device *pd; + struct buffer_head *bh, *bhlist[PACKET_MAX_SIZE]; + struct request *rq; + unsigned long start_s, end_s, sector; + int i, sectors, cnt = 0, ss, bs; + + rq = q->current_request; + if (MAJOR(rq->rq_dev) == PACKET_MAJOR) { + printk("packet: direct requests not supported!\n"); + if (end_that_request_first(rq, 0, "packet")) + end_that_request_last(rq); + return; + } + + pd = pkt_find_dev(rq->rq_dev); + ASSERT(pd); + + /* + * all calculations are done with 512 byte sectors. block size is + * set to 2048 for all CD-ROM's, but I still get the occasional + * 1024 cluster... + */ + sectors = pd->settings.size - rq->nr_sectors; + start_s = rq->sector - (rq->sector % pd->settings.size); + end_s = start_s + pd->settings.size; + + /* + * ss == sectors per bh, bs == size of bh + */ + ss = rq->current_nr_sectors; + bs = rq->bh->b_size; + + VPRINTK("need %d sectors for %s\n", sectors, kdevname(rq->rq_dev)); + VPRINTK("from %lu to %lu (%lu - %lu)\n", start_s, end_s, rq->bh->b_rsector, rq->bhtail->b_rsector + rq->current_nr_sectors); + + /* got front hole, if any */ + spin_unlock_irq(&io_request_lock); + sector = rq->sector; + if (sector > start_s) { + cnt = (sector - start_s) / ss; + sector = start_s; +#if PACKET_DEBUG > 1 + DPRINTK("get %u bhs at front (%lu)\n", cnt, sector); +#endif + for (i = 0; i < cnt; i++) { + bhlist[i] = getblk(rq->rq_dev, sector / ss, bs); + bhlist[i]->b_dev_id = pkt_end_io_write; + rq->nr_sectors += ss; + rq->nr_segments++; + sector += ss; + } + VPRINTK("block now %lu (%lu)\n", sector, sector / ss); + pkt_read_bhlist(pd, bhlist, cnt); + /* link them (pkt_read_bhlist screws them up) */ + for (i = 0; i < cnt; i++) + bhlist[i]->b_reqnext = bhlist[i + 1]; + bhlist[cnt - 1]->b_reqnext = rq->bh; + bh = rq->bh; + rq->bh = bhlist[0]; + rq->buffer = rq->bh->b_data; +#if PACKET_DEBUG > 1 + pkt_show_bhstring(rq->bh); +#endif + } else { + bh = rq->bh; + } + + + /* walk the rest and get the chunks we are missing */ + VPRINTK("traversing rest of list %lu %lu\n", sector, sector / ss); +#if PACKET_DEBUG > 1 + pkt_show_bhstring(rq->bh); +#endif + for (sector += ss; sector < end_s; sector += ss) { + struct buffer_head *foo_bh; + /* next bh is in extension of this one */ + if (bh->b_reqnext) { + if (NEXT_BH(bh, bh->b_reqnext)) { + bh = bh->b_reqnext; + VPRINTK("hole at %lu\n", bh->b_rsector); + continue; + } + } + foo_bh = getblk(rq->rq_dev, sector / ss, bs); + foo_bh->b_dev_id = pkt_end_io_write; + pkt_read_bhlist(pd, &foo_bh, 1); + VPRINTK("got block %lu\n", foo_bh->b_rsector); + if (bh->b_reqnext) + foo_bh->b_reqnext = bh->b_reqnext; + bh->b_reqnext = foo_bh; + bh = foo_bh; + rq->nr_sectors += ss; + rq->nr_segments++; + } +#if PACKET_DEBUG > 1 +// pkt_show_bhstring(rq->bh); +#endif + bh->b_reqnext = NULL; + rq->bhtail = bh; + rq->buffer = rq->bh->b_data; + VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector); + + rq->sector = start_s; + rq->cmd = WRITE_PACKET; + spin_lock_irq(&io_request_lock); + pkt_init_bh(rq); + pkt_add_stats(pd, sectors, rq->nr_sectors, rq->current_nr_sectors); + + /* sanity check */ + if (rq->nr_sectors != pd->settings.size) { + pkt_show_bhstring(rq->bh); + panic("packet: request mismatch %lu (should be %u)\n", + rq->nr_sectors, + pd->settings.size); + } + + /* plug real device again and submit this write */ + ASSERT(rq->next == NULL); + wq = pkt_get_queue(rq->rq_dev); + rq->next = wq->current_request; + q->current_request = NULL; + wq->current_request = rq; + wq->current_request->next = (struct request *) pd->q->queuedata; + pd->q->queuedata = NULL; + /* invoke request function for device */ + pkt_schedule_queue(wq); +} + +/* + * speed is given as the normal factor, e.g. 4 for 4x + */ +int pkt_set_write_speed(struct packet_device *pd, int speed) +{ + struct cdrom_generic_command cgc; + struct cdrom_device_info *cdi = pd->cdi; + + init_cdrom_command(&cgc, NULL, 0); + cgc.cmd[0] = 0xbb; + + /* + * we set read and write time to the same. although a drive + * can typically read much faster than write, this minimizes + * the spin up/down when we write and gather data. if the + * device is opened read only, we never get to here. + */ + cgc.cmd[2] = cgc.cmd[4] = ((0xb0 * speed) >> 8) & 0xff; + cgc.cmd[3] = cgc.cmd[5] = (0xb0 * speed) & 0xff; + + return cdi->ops->generic_packet(cdi, &cgc); +} + +/* + * called from low level drivers at open time. return 1 if the device + * can only be opened read-only. + */ +int pkt_open_dev(kdev_t dev) +{ + struct packet_device *pd = pkt_find_dev(dev); + int ret; + + if ((ret = pkt_probe_settings(pd))) { + DPRINTK("packet: %s failed probe\n", pd->name); + return ret; + } + + if ((ret = pkt_write_settings(pd))) { + DPRINTK("packet: %s failed saving write settings\n", pd->name); + return ret; + } + + /* FIXME: probe drives that can go faster than 4x */ + if ((ret = pkt_set_write_speed(pd, 4))) { + DPRINTK("packet: %s couldn't set write speed\n", pd->name); + return ret; + } + return 0; +} + +static void pkt_show_bhinfo(struct buffer_head *bh) +{ + char *list[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", "PROTECTED" }; + printk("%s: count %d state %lx\n", list[bh->b_list], atomic_read(&bh->b_count), bh->b_state); +} + +/* + * flush the drive cache to media + */ +static int pkt_flush_cache(struct packet_device *pd) +{ + struct cdrom_generic_command cgc; + struct cdrom_device_info *cdi = pd->cdi; + + init_cdrom_command(&cgc, NULL, 0); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; +#if 0 + cgc.cmd[1] = 1 << 1; +#endif + return cdi->ops->generic_packet(cdi, &cgc); +} + +/* + * called from low level drivers when the device is closed. + */ +void pkt_release_dev(kdev_t dev) +{ + struct packet_device *pd = pkt_find_dev(dev); + + if (pkt_flush_cache(pd)) + DPRINTK("packet: %s not flushing cache\n", pd->name); +} + +/* + * Remove a previosly registered device + */ +int pkt_unregister(struct cdrom_device_info *cdi) +{ + struct packet_device *pd; + unsigned long flags; + request_queue_t *q; + + if ((pd = pkt_find_dev(cdi->dev)) == NULL) + return 0; + + /* restore old queue */ + spin_lock_irqsave(&io_request_lock, flags); + q = pkt_get_queue(pd->dev); + q->merge_fn = pd->q->merge_fn; + q->merge_requests_fn = pd->q->merge_requests_fn; + q->merge_bh_fn = pd->q->merge_bh_fn; + spin_unlock_irqrestore(&io_request_lock, flags); + + pd->dev = 0; + pd->cdi = NULL; + kfree(pd->q); + DPRINTK("packet: writer %s unregistered\n", cdi->name); + MOD_DEC_USE_COUNT; + return 1; +} + +/* + * Called at init time from CD-ROM drivers that want to use packet writing. + */ +int pkt_register(struct cdrom_device_info *cdi) +{ + struct packet_device *pd; + unsigned long flags; + request_queue_t *q; + int i, ret = 0; + + MOD_INC_USE_COUNT; + + if (MAJOR(cdi->dev) == SCSI_CDROM_MAJOR) { + printk("packet: sorry, only ATAPI is supported at this time\n"); + ret = -EOPNOTSUPP; + goto out; + } + + for (i = 0; i < MAX_WRITERS; i++) + if (!packet_devs[i].dev) + break; + + if (i == MAX_WRITERS) { + printk("packet: max %d writers supported\n", MAX_WRITERS); + ret = -ENOSYS; + goto out; + } + + pd = &packet_devs[i]; + memset(pd, 0, sizeof(struct packet_device)); + if ((pd->q = kmalloc(sizeof(request_queue_t), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto out; + } + + blk_init_queue(pd->q, pkt_request); + pd->q->queuedata = NULL; + set_blocksize(cdi->dev, CD_FRAMESIZE); + pd->cdi = cdi; + pd->dev = cdi->dev; + strncpy(pd->name, cdi->name, sizeof(pd->name) - 1); + spin_lock_init(&pd->lock); + + /* + * replace merge functions with our own. we allow holes in requests, + * since that gives us a better chance of doing optimized writes + */ + spin_lock_irqsave(&io_request_lock, flags); + q = pkt_get_queue(cdi->dev); + pd->q->merge_fn = q->merge_fn; + pd->q->merge_requests_fn = q->merge_requests_fn; + pd->q->merge_bh_fn = q->merge_bh_fn; + q->merge_fn = pkt_merge_fn; + q->merge_requests_fn = pkt_merge_requests_fn; + q->merge_bh_fn = pkt_merge_bh_fn; + spin_unlock_irqrestore(&io_request_lock, flags); + + DPRINTK("packet: writer %s sucessfully registered\n", pd->name); + return 0; +out: + MOD_DEC_USE_COUNT; + return ret; +} + +/* open and release is just for handling ioctls */ +static struct block_device_operations packet_ops = { + open: pkt_open, + release: pkt_close, + ioctl: pkt_ioctl, +}; + +int pkt_init(void) +{ + int ret; + + if (register_blkdev(PACKET_MAJOR, "packet", &packet_ops)) { + printk("unable to register packet device\n"); + return -EIO; + } + ret = -ENOMEM; + packet_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL); + if (packet_sizes == NULL) + goto out; + + packet_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL); + if (packet_blksize == NULL) + goto out; + + packet_devs = kmalloc(MAX_WRITERS * sizeof(struct packet_device), GFP_KERNEL); + if (packet_devs == NULL) + goto out; + + memset(packet_devs, 0, MAX_WRITERS * sizeof(struct packet_device)); + memset(packet_sizes, 0, MAX_WRITERS * sizeof(int)); + memset(packet_blksize, 0, MAX_WRITERS * sizeof(int)); + + blk_size[PACKET_MAJOR] = packet_sizes; + blksize_size[PACKET_MAJOR] = packet_blksize; + set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE); + + DPRINTK("packet: %s\n", VERSION_CODE); + return 0; + +out: + unregister_blkdev(PACKET_MAJOR, "packet"); + if (packet_devs) + kfree(packet_devs); + if (packet_sizes) + kfree(packet_sizes); + if (packet_blksize) + kfree(packet_blksize); + return ret; +} + +void __exit pkt_exit(void) +{ + unregister_blkdev(PACKET_MAJOR, "packet"); + kfree(packet_sizes); + kfree(packet_blksize); + kfree(packet_devs); +} + +EXPORT_SYMBOL(pkt_register); +EXPORT_SYMBOL(pkt_unregister); +EXPORT_SYMBOL(pkt_handle_request); +EXPORT_SYMBOL(pkt_open_dev); +EXPORT_SYMBOL(pkt_release_dev); + +module_init(pkt_init); +module_exit(pkt_exit); diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/packet.h linux/drivers/block/packet.h --- linux-2.3.43-6-clean/drivers/block/packet.h Thu Jan 1 01:00:00 1970 +++ linux/drivers/block/packet.h Thu Feb 10 02:00:46 2000 @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2000 Jens Axboe + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and + * DVD-RW devices. + * + */ + +#ifndef PACKET_H +#define PACKET_H + +#include + +/* + * device types + */ +#define PACKET_CDR 1 +#define PACKET_CDRW 2 +#define PACKET_DVDR 3 +#define PACKET_DVDRW 4 + +/* + * flags + */ +#define PACKET_WRITEABLE 1 +#define PACKET_NWA_VALID 2 +#define PACKET_LRA_VALID 4 + +/* + * Disc status -- from READ_DISC_INFO + */ +#define PACKET_DISC_EMPTY 0 +#define PACKET_DISC_INCOMPLETE 1 +#define PACKET_DISC_COMPLETE 2 +#define PACKET_DISC_OTHER 3 + +/* + * Last session/border status + */ +#define PACKET_SESSION_EMPTY 0 +#define PACKET_SESSION_INCOMPLETE 1 +#define PACKET_SESSION_RESERVED 2 +#define PACKET_SESSION_COMPLETE 3 + +struct packet_stats { + unsigned long bh_s; + unsigned long bh_e; + unsigned long blocks_written; + unsigned long blocks_read; +}; + +/* + * packet ioctls + */ +#define PACKET_GET_STATS _IOR('X', 0, struct packet_stats) + +#ifdef __KERNEL__ +extern int pkt_unregister(struct cdrom_device_info *cdi); +extern int pkt_register(struct cdrom_device_info *cdi); +extern void pkt_handle_request(request_queue_t *queue); +extern int pkt_open_dev(kdev_t dev); +extern void pkt_release_dev(kdev_t dev); + +struct packet_settings { + __u8 size; /* packet size in frames */ + __u8 fp; /* fixed packets */ + __u8 link_loss; /* the rest is specified + * as per Mt Fuji */ + __u8 write_type; + __u8 track_mode; + __u8 block_mode; +}; + +struct packet_device { + kdev_t dev; + char name[6]; + struct cdrom_device_info *cdi; + request_queue_t *q; + merge_request_fn *merge_fn; + merge_requests_fn *merge_requests_fn; + struct packet_settings settings; + struct packet_stats stats; + __u8 type; + __u32 flags; + __u8 disc_status; + __u8 track_status; /* last one */ + __u32 nwa; /* next writable address */ + __u32 lra; /* last recorded address */ + spinlock_t lock; +}; +#endif /* __KERNEL__ */ + +#endif /* PACKET_H */ diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/block/rd.c linux/drivers/block/rd.c --- linux-2.3.43-6-clean/drivers/block/rd.c Thu Feb 10 04:38:07 2000 +++ linux/drivers/block/rd.c Thu Feb 10 01:33:56 2000 @@ -290,7 +290,10 @@ switch (cmd) { case BLKFLSBUF: if (!capable(CAP_SYS_ADMIN)) return -EACCES; - invalidate_buffers(inode->i_rdev); + /* special: we want to release the ramdisk memory, + it's not like with the other blockdevices where + this ioctl only flushes away the buffer cache. */ + destroy_buffers(inode->i_rdev); break; case BLKGETSIZE: /* Return device size */ @@ -382,7 +385,7 @@ int i; for (i = 0 ; i < NUM_RAMDISKS; i++) - invalidate_buffers(MKDEV(MAJOR_NR, i)); + destroy_buffers(MKDEV(MAJOR_NR, i)); unregister_blkdev( MAJOR_NR, "ramdisk" ); blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/drivers/cdrom/cdrom.c linux/drivers/cdrom/cdrom.c --- linux-2.3.43-6-clean/drivers/cdrom/cdrom.c Tue Feb 1 15:44:54 2000 +++ linux/drivers/cdrom/cdrom.c Thu Feb 3 19:06:50 2000 @@ -187,14 +187,23 @@ -- Fixed CDDA ripping with cdda2wav - accept much larger requests of number of frames and split the reads in blocks of 8. - 3.05 Dec 13, 1999 - Jens Axboe + 3.06 Dec 13, 1999 - Jens Axboe -- Added support for changing the region of DVD drives. -- Added sense data to generic command. + + 3.07 Feb 2, 2000 - Jens Axboe + -- Do same "read header length" trick in cdrom_get_disc_info() as + we do in cdrom_get_track_info() -- some drive don't obbey specs and + fail if they can't supply the full Mt Fuji size table. + -- Deleted stuff related to setting up write modes. It has a different + home now. + -- Clear header length in mode_select unconditionally. + -- Removed the register_disk() that was added, not needed here. -------------------------------------------------------------------------*/ -#define REVISION "Revision: 3.06" -#define VERSION "Id: cdrom.c 3.06 1999/12/13" +#define REVISION "Revision: 3.07" +#define VERSION "Id: cdrom.c 3.07 2000/02/02" /* I use an error-log mask to give fine grain control over the type of messages dumped to the system logs. The available masks include: */ @@ -1285,7 +1294,7 @@ struct cdrom_device_ops *cdo = cdi->ops; memset(cgc->cmd, 0, sizeof(cgc->cmd)); - + memset(cgc->buffer, 0, 2); cgc->cmd[0] = GPCMD_MODE_SELECT_10; cgc->cmd[1] = 0x10; /* PF */ cgc->cmd[7] = cgc->buflen >> 8; @@ -1960,9 +1969,6 @@ buffer[offset+13] = volctrl.channel2 & mask[offset+13]; buffer[offset+15] = volctrl.channel3 & mask[offset+15]; - /* clear the first three */ - memset(buffer, 0, 3); - /* set volume */ cgc.buffer = buffer; return cdrom_mode_select(cdi, &cgc); @@ -2061,7 +2067,7 @@ if (copy && !ret) __copy_to_user(userbuf, cgc.buffer, cgc.buflen); /* copy back sense data */ - if (ret && sense != NULL) + if (sense != NULL) if (copy_to_user(sense, cgc.sense, sizeof(struct request_sense))) ret = -EFAULT; kfree(cgc.buffer); @@ -2116,12 +2122,26 @@ struct cdrom_device_info *cdi = cdrom_find_device(dev); struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_generic_command cgc; + int ret; /* set up command and get the disc info */ init_cdrom_command(&cgc, di, sizeof(*di)); cgc.cmd[0] = GPCMD_READ_DISC_INFO; - cgc.cmd[8] = cgc.buflen; + cgc.cmd[8] = cgc.buflen = 2; + + if ((ret = cdo->generic_packet(cdi, &cgc))) + return ret; + + /* not all drives have the same disc_info length, so requeue + * packet with the length the drive tells us it can supply + */ + cgc.buflen = be16_to_cpu(di->disc_information_length) + + sizeof(di->disc_information_length); + + if (cgc.buflen > sizeof(disc_information)) + cgc.buflen = sizeof(disc_information); + cgc.cmd[8] = cgc.buflen; return cdo->generic_packet(cdi, &cgc); } diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/fs/block_dev.c linux/fs/block_dev.c --- linux-2.3.43-6-clean/fs/block_dev.c Tue Feb 1 15:44:59 2000 +++ linux/fs/block_dev.c Fri Feb 4 18:20:12 2000 @@ -70,39 +70,51 @@ if (chars != blocksize) fn = bread; bh = fn(dev, block, blocksize); + if (!bh) + return written ? written : -EIO; + if (!buffer_uptodate(bh)) + wait_on_buffer(bh); } #else bh = getblk(dev, block, blocksize); + if (!bh) + return written ? written : -EIO; - if (chars != blocksize && !buffer_uptodate(bh)) { - if(!filp->f_reada || - !read_ahead[MAJOR(dev)]) { - /* We do this to force the read of a single buffer */ - brelse(bh); - bh = bread(dev,block,blocksize); - } else { - /* Read-ahead before write */ - blocks = read_ahead[MAJOR(dev)] / (blocksize >> 9) / 2; - if (block + blocks > size) blocks = size - block; - if (blocks > NBUF) blocks=NBUF; + if (!buffer_uptodate(bh)) + { + if (chars == blocksize) + wait_on_buffer(bh); + else + { bhlist[0] = bh; - for(i=1; i= 0) brelse(bhlist[i--]); - return written ? written : -EIO; - }; - }; + if (!filp->f_reada || !read_ahead[MAJOR(dev)]) { + /* We do this to force the read of a single buffer */ + blocks = 1; + } else { + /* Read-ahead before write */ + blocks = read_ahead[MAJOR(dev)] / (blocksize >> 9) / 2; + if (block + blocks > size) blocks = size - block; + if (blocks > NBUF) blocks=NBUF; + for(i=1; i= 0) brelse(bhlist[i--]); + return written ? written : -EIO; + } + } + } ll_rw_block(READ, blocks, bhlist); for(i=1; ib_data; offset = 0; *ppos += chars; @@ -249,7 +261,8 @@ do { /* Finish off all I/O that has actually completed */ if (*bhe) { - wait_on_buffer(*bhe); + if (!buffer_uptodate(*bhe)) + wait_on_buffer(*bhe); if (!buffer_uptodate(*bhe)) { /* read error? */ brelse(*bhe); if (++bhe == &buflist[NBUF]) @@ -522,7 +535,7 @@ if (sb && invalidate_inodes(sb)) printk("VFS: busy inodes on changed media.\n"); - invalidate_buffers(dev); + destroy_buffers(dev); if (bdops->revalidate) bdops->revalidate(dev); diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/fs/buffer.c linux/fs/buffer.c --- linux-2.3.43-6-clean/fs/buffer.c Thu Feb 10 04:38:07 2000 +++ linux/fs/buffer.c Thu Feb 10 01:33:56 2000 @@ -94,6 +94,7 @@ kmem_cache_t *bh_cachep; static int grow_buffers(int size); +static void __refile_buffer(struct buffer_head *); /* This is used by some architectures to estimate available memory. */ atomic_t buffermem_pages = ATOMIC_INIT(0); @@ -131,6 +132,30 @@ int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,600*HZ, 6000*HZ, 6000*HZ, 2047, 5}; /* + * Buffer cache locking - note that interrupts may only unlock, not + * lock buffers. + */ +extern void wait_on_buffer_timer(unsigned long data) +{ + struct buffer_head *bh = (struct buffer_head *) data; + + printk("timeout: %s for bh %lu %lx %d %d\n", kdevname(bh->b_dev), bh->b_rsector, bh->b_state, atomic_read(&bh->b_count), bh->b_list); + show_buffers(); +} + +extern void wait_on_buffer(struct buffer_head * bh) +{ + if (test_bit(BH_Lock, &bh->b_state)) { + init_timer(&bh->timer); + bh->timer.expires = 10*HZ + jiffies; + bh->timer.data = (unsigned long) bh; + bh->timer.function = wait_on_buffer_timer; + add_timer(&bh->timer); + __wait_on_buffer(bh); + del_timer(&bh->timer); + } +} +/* * Rewrote the wait-routines to use the "new" wait-queue functionality, * and getting rid of the cli-sti pairs. The wait-queue routines still * need cli-sti, but now it's just a couple of 386 instructions or so. @@ -277,11 +302,14 @@ void sync_dev(kdev_t dev) { - sync_buffers(dev, 0); sync_supers(dev); sync_inodes(dev); - sync_buffers(dev, 0); DQUOT_SYNC(dev); + /* sync all the dirty buffers out to disk only _after_ all the + high level layers finished generated buffer dirty data + (or we'll return with some buffer still dirty on the blockdevice + so breaking the semantics of this call) */ + sync_buffers(dev, 0); /* * FIXME(eric) we need to sync the physical devices here. * This is because some (scsi) controllers have huge amounts of @@ -412,40 +440,6 @@ return err; } -void invalidate_buffers(kdev_t dev) -{ - int nlist; - - spin_lock(&lru_list_lock); - for(nlist = 0; nlist < NR_LIST; nlist++) { - struct buffer_head * bh; - int i; - retry: - bh = lru_list[nlist]; - if (!bh) - continue; - for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bh->b_next_free) { - if (bh->b_dev != dev) - continue; - if (buffer_locked(bh)) { - atomic_inc(&bh->b_count); - spin_unlock(&lru_list_lock); - wait_on_buffer(bh); - spin_lock(&lru_list_lock); - atomic_dec(&bh->b_count); - goto retry; - } - if (atomic_read(&bh->b_count)) - continue; - clear_bit(BH_Protected, &bh->b_state); - clear_bit(BH_Uptodate, &bh->b_state); - clear_bit(BH_Dirty, &bh->b_state); - clear_bit(BH_Req, &bh->b_state); - } - } - spin_unlock(&lru_list_lock); -} - /* After several hours of tedious analysis, the following hash * function won. Do not mess with it... -DaveM */ @@ -464,10 +458,13 @@ static __inline__ void __hash_unlink(struct buffer_head *bh) { - if (bh->b_next) - bh->b_next->b_pprev = bh->b_pprev; - *(bh->b_pprev) = bh->b_next; - bh->b_pprev = NULL; + if (bh->b_pprev) + { + if (bh->b_next) + bh->b_next->b_pprev = bh->b_pprev; + *(bh->b_pprev) = bh->b_next; + bh->b_pprev = NULL; + } } static void __insert_into_lru_list(struct buffer_head * bh, int blist) @@ -514,17 +511,12 @@ bh->b_next_free = bh->b_prev_free = NULL; } -/* The following two functions must operate atomically - * because they control the visibility of a buffer head - * to the rest of the kernel. - */ -static __inline__ void __remove_from_queues(struct buffer_head *bh) +/* must be called with both the hash_table_lock and the lru_list_lock + held */ +static void __remove_from_queues(struct buffer_head *bh) { - write_lock(&hash_table_lock); - if (bh->b_pprev) - __hash_unlink(bh); + __hash_unlink(bh); __remove_from_lru_list(bh, bh->b_list); - write_unlock(&hash_table_lock); } static void insert_into_queues(struct buffer_head *bh) @@ -547,6 +539,8 @@ struct bh_free_head *head = &free_list[BUFSIZE_INDEX(bh->b_size)]; struct buffer_head **bhp = &head->list; + bh->b_state = 0; + spin_lock(&head->lock); bh->b_dev = B_FREE; if(!*bhp) { @@ -604,11 +598,75 @@ return 0; } +/* If invalidate_buffers() will trash dirty buffers, it means some kind + of fs corruption is going on. Trashing dirty data always imply losing + information that was supposed to be just stored on the physical layer + by the user. + + Thus invalidate_buffers in general usage is not allwowed to trash dirty + buffers. For example ioctl(FLSBLKBUF) expects dirty data to be preserved. + + NOTE: In the case where the user removed a removable-media-disk even if + there's still dirty data not synced on disk (due a bug in the device driver + or due an error of the user), by not destroying the dirty buffers we could + generate corruption also on the next media inserted, thus a parameter is + necessary to handle this case in the most safe way possible (trying + to not corrupt also the new disk inserted with the data belonging to + the old now corrupted disk). Also for the ramdisk the natural thing + to do in order to release the ramdisk memory is to destroy dirty buffers. + + These are two special cases. Normal usage imply the device driver + to issue a sync on the device (without waiting I/O completation) and + then an invalidate_buffers call that doesn't trashes dirty buffers. */ +void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers) +{ + int i, nlist, slept; + struct buffer_head * bh, * bh_next; + + retry: + slept = 0; + spin_lock(&lru_list_lock); + for(nlist = 0; nlist < NR_LIST; nlist++) { + bh = lru_list[nlist]; + if (!bh) + continue; + for (i = nr_buffers_type[nlist]; i > 0 ; bh = bh_next, i--) + { + bh_next = bh->b_next_free; + if (bh->b_dev != dev) + continue; + if (buffer_locked(bh)) { + atomic_inc(&bh->b_count); + spin_unlock(&lru_list_lock); + wait_on_buffer(bh); + slept = 1; + spin_lock(&lru_list_lock); + atomic_dec(&bh->b_count); + } + + write_lock(&hash_table_lock); + if (!atomic_read(&bh->b_count) && + (destroy_dirty_buffers || !buffer_dirty(bh))) + { + __remove_from_queues(bh); + put_last_free(bh); + } + write_unlock(&hash_table_lock); + if (slept) + goto out; + } + } +out: + spin_unlock(&lru_list_lock); + if (slept) + goto retry; +} + void set_blocksize(kdev_t dev, int size) { extern int *blksize_size[]; - int i, nlist; - struct buffer_head * bh, *bhnext; + int i, nlist, slept; + struct buffer_head * bh, * bh_next; if (!blksize_size[MAJOR(dev)]) return; @@ -626,41 +684,57 @@ sync_buffers(dev, 2); blksize_size[MAJOR(dev)][MINOR(dev)] = size; - /* We need to be quite careful how we do this - we are moving entries - * around on the free list, and we can get in a loop if we are not careful. - */ + retry: + slept = 0; + spin_lock(&lru_list_lock); for(nlist = 0; nlist < NR_LIST; nlist++) { - repeat: - spin_lock(&lru_list_lock); bh = lru_list[nlist]; - for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) { - if(!bh) - break; - - bhnext = bh->b_next_free; - if (bh->b_dev != dev) - continue; - if (bh->b_size == size) - continue; + if (!bh) + continue; + for (i = nr_buffers_type[nlist]; i > 0 ; bh = bh_next, i--) + { + bh_next = bh->b_next_free; + if (bh->b_dev != dev || bh->b_size == size) + continue; if (buffer_locked(bh)) { atomic_inc(&bh->b_count); spin_unlock(&lru_list_lock); wait_on_buffer(bh); + slept = 1; + spin_lock(&lru_list_lock); atomic_dec(&bh->b_count); - goto repeat; - } - if (bh->b_dev == dev && bh->b_size != size) { - clear_bit(BH_Dirty, &bh->b_state); - clear_bit(BH_Uptodate, &bh->b_state); - clear_bit(BH_Req, &bh->b_state); } - if (atomic_read(&bh->b_count) == 0) { + + write_lock(&hash_table_lock); + if (!atomic_read(&bh->b_count)) + { + if (buffer_dirty(bh)) + printk(KERN_WARNING + "set_blocksize: dev %s buffer_dirty %lu size %hu\n", + kdevname(dev), bh->b_blocknr, bh->b_size); __remove_from_queues(bh); put_last_free(bh); } + else + { + if (atomic_set_buffer_clean(bh)) + __refile_buffer(bh); + clear_bit(BH_Uptodate, &bh->b_state); + printk(KERN_WARNING + "set_blocksize: " + "b_count %d, dev %s, block %lu, from %p\n", + atomic_read(&bh->b_count), bdevname(bh->b_dev), + bh->b_blocknr, __builtin_return_address(0)); + } + write_unlock(&hash_table_lock); + if (slept) + goto out; } - spin_unlock(&lru_list_lock); } + out: + spin_unlock(&lru_list_lock); + if (slept) + goto retry; } /* @@ -682,7 +756,7 @@ bh->b_dev_id = dev_id; } -static void end_buffer_io_sync(struct buffer_head *bh, int uptodate) +void end_buffer_io_sync(struct buffer_head *bh, int uptodate) { mark_buffer_uptodate(bh, uptodate); unlock_buffer(bh); @@ -695,7 +769,7 @@ BUG(); } -static void end_buffer_io_async(struct buffer_head * bh, int uptodate) +void end_buffer_io_async(struct buffer_head * bh, int uptodate) { static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; @@ -785,30 +859,29 @@ atomic_set(&bh->b_count, 1); } spin_unlock(&free_list[isize].lock); - if (!bh) - goto refill; - - /* OK, FINALLY we know that this buffer is the only one of its kind, - * we hold a reference (b_count>0), it is unlocked, and it is clean. - */ - init_buffer(bh, end_buffer_io_sync, NULL); - bh->b_dev = dev; - bh->b_blocknr = block; - bh->b_state = 1 << BH_Mapped; + if (bh) + { + /* OK, FINALLY we know that this buffer is the only one of + its kind, we hold a reference (b_count>0), it is unlocked, + and it is clean. */ + init_buffer(bh, end_buffer_io_sync, NULL); + bh->b_dev = dev; + bh->b_blocknr = block; + bh->b_state = 1 << BH_Mapped; - /* Insert the buffer into the regular lists */ - insert_into_queues(bh); - goto out; + /* Insert the buffer into the regular lists */ + insert_into_queues(bh); + out: + touch_buffer(bh); + return bh; + } /* * If we block while refilling the free list, somebody may * create the buffer first ... search the hashes again. */ -refill: refill_freelist(size); goto repeat; -out: - return bh; } /* -1 -> no need to flush @@ -848,23 +921,38 @@ wakeup_bdflush(state); } -static inline void __mark_dirty(struct buffer_head *bh, int flag) +#define set_bh_age(bh, flag) \ +do { \ + (bh)->b_flushtime = jiffies + \ + ((flag) ? bdf_prm.b_un.age_super : \ + bdf_prm.b_un.age_buffer); \ +} while(0) + +static __inline__ void __mark_dirty(struct buffer_head *bh, int flag) { - bh->b_flushtime = jiffies + (flag ? bdf_prm.b_un.age_super : bdf_prm.b_un.age_buffer); - clear_bit(BH_New, &bh->b_state); + set_bh_age(bh, flag); refile_buffer(bh); } +/* atomic version, the user must call balance_dirty() by hand + as soon as it become possible to block */ void __mark_buffer_dirty(struct buffer_head *bh, int flag) { - __mark_dirty(bh, flag); + if (!atomic_set_buffer_dirty(bh)) + __mark_dirty(bh, flag); +} + +void mark_buffer_dirty(struct buffer_head *bh, int flag) +{ + __mark_buffer_dirty(bh, flag); + balance_dirty(bh->b_dev); } /* * A buffer may need to be moved from one buffer list to another * (e.g. in case it is not shared any more). Handle this. */ -static __inline__ void __refile_buffer(struct buffer_head *bh) +static void __refile_buffer(struct buffer_head *bh) { int dispose = BUF_CLEAN; if (buffer_locked(bh)) @@ -890,8 +978,6 @@ */ void __brelse(struct buffer_head * buf) { - touch_buffer(buf); - if (atomic_read(&buf->b_count)) { atomic_dec(&buf->b_count); return; @@ -912,12 +998,10 @@ write_lock(&hash_table_lock); if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf)) goto in_use; - if (buf->b_pprev) - __hash_unlink(buf); + __hash_unlink(buf); write_unlock(&hash_table_lock); __remove_from_lru_list(buf, buf->b_list); spin_unlock(&lru_list_lock); - buf->b_state = 0; put_last_free(buf); return; @@ -938,7 +1022,8 @@ if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); - wait_on_buffer(bh); + if (!buffer_uptodate(bh)) + wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); @@ -1003,7 +1088,8 @@ /* Wait for this buffer, and then continue on. */ bh = bhlist[0]; - wait_on_buffer(bh); + if (!buffer_uptodate(bh)) + wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); @@ -1225,6 +1311,7 @@ clear_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Mapped, &bh->b_state); clear_bit(BH_Req, &bh->b_state); + clear_bit(BH_New, &bh->b_state); } } @@ -1303,7 +1390,6 @@ static void unmap_underlying_metadata(struct buffer_head * bh) { -#if 0 if (buffer_new(bh)) { struct buffer_head *old_bh; @@ -1316,7 +1402,6 @@ __bforget(old_bh); } } -#endif } /* @@ -1326,7 +1411,7 @@ int block_write_full_page(struct dentry *dentry, struct page *page) { struct inode *inode = dentry->d_inode; - int err, i; + int err, i, need_balance_dirty = 0; unsigned long block; struct buffer_head *bh, *head; @@ -1364,12 +1449,19 @@ unmap_underlying_metadata(bh); } set_bit(BH_Uptodate, &bh->b_state); - mark_buffer_dirty(bh,0); + if (!atomic_set_buffer_dirty(bh)) + { + __mark_dirty(bh, 0); + need_balance_dirty = 1; + } bh = bh->b_this_page; block++; } while (bh != head); + if (need_balance_dirty) + balance_dirty(bh->b_dev); + SetPageUptodate(page); return 0; out: @@ -1416,12 +1508,12 @@ if (err) goto out; unmap_underlying_metadata(bh); - } - if (buffer_new(bh)) { - zeroto = block_end; - if (block_start < zerofrom) - zerofrom = block_start; - continue; + if (buffer_new(bh)) { + zeroto = block_end; + if (block_start < zerofrom) + zerofrom = block_start; + continue; + } } if (!buffer_uptodate(bh) && (block_start < zerofrom || block_end > to)) { @@ -1433,7 +1525,8 @@ * If we issued read requests - let them complete. */ while(wait_bh > wait) { - wait_on_buffer(*--wait_bh); + if (!buffer_uptodate(*--wait_bh)) + wait_on_buffer(*wait_bh); err = -EIO; if (!buffer_uptodate(*wait_bh)) goto out; @@ -1475,7 +1568,7 @@ partial = 1; } else { set_bit(BH_Uptodate, &bh->b_state); - if (!test_and_set_bit(BH_Dirty, &bh->b_state)) { + if (!atomic_set_buffer_dirty(bh)) { __mark_dirty(bh, 0); need_balance_dirty = 1; } @@ -2025,13 +2118,10 @@ /* The buffer can be either on the regular * queues or on the free list.. */ - if (p->b_dev == B_FREE) { + if (p->b_dev != B_FREE) + __remove_from_queues(p); + else __remove_from_free_list(p, index); - } else { - if (p->b_pprev) - __hash_unlink(p); - __remove_from_lru_list(p, p->b_list); - } __put_unused_buffer_head(p); } while (tmp != bh); spin_unlock(&unused_list_lock); @@ -2060,13 +2150,11 @@ void show_buffers(void) { -#ifdef __SMP__ struct buffer_head * bh; int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; int protected = 0; int nlist; static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY" }; -#endif printk("Buffer memory: %6dkB\n", atomic_read(&buffermem_pages) << (PAGE_SHIFT-10)); @@ -2074,11 +2162,15 @@ #ifdef __SMP__ /* trylock does nothing on UP and so we could deadlock */ if (!spin_trylock(&lru_list_lock)) return; +#endif for(nlist = 0; nlist < NR_LIST; nlist++) { found = locked = dirty = used = lastused = protected = 0; bh = lru_list[nlist]; if(!bh) continue; + if (bh->b_dev != MKDEV(22, 64)) + continue; + do { found++; if (buffer_locked(bh)) @@ -2097,7 +2189,6 @@ locked, protected, dirty); } spin_unlock(&lru_list_lock); -#endif } /* ===================== Init ======================= */ diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/fs/super.c linux/fs/super.c --- linux-2.3.43-6-clean/fs/super.c Tue Feb 1 15:45:00 2000 +++ linux/fs/super.c Fri Feb 4 18:00:37 2000 @@ -1378,7 +1378,10 @@ bdev = do_umount(old_root_dev,1, 0); if (!IS_ERR(bdev)) { printk("okay\n"); - invalidate_buffers(old_root_dev); + /* special: the old device driver is going to be + a ramdisk and the point of this call is to free its + protected memory (even if dirty). */ + destroy_buffers(old_root_dev); if (bdev) { blkdev_put(bdev, BDEV_FS); bdput(bdev); diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/fs/udf/fsync.c linux/fs/udf/fsync.c --- linux-2.3.43-6-clean/fs/udf/fsync.c Thu Feb 10 04:38:08 2000 +++ linux/fs/udf/fsync.c Thu Feb 10 03:31:05 2000 @@ -28,6 +28,7 @@ #include #include #include +#include #include "udf_i.h" static int sync_extent_block (struct inode * inode, Uint32 block, int wait) diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h --- linux-2.3.43-6-clean/include/asm-i386/spinlock.h Tue Feb 1 15:44:51 2000 +++ linux/include/asm-i386/spinlock.h Thu Feb 10 01:35:41 2000 @@ -12,7 +12,7 @@ * initialize their spinlocks properly, tsk tsk. * Remember to turn this off in 2.4. -ben */ -#define SPINLOCK_DEBUG 1 +#define SPINLOCK_DEBUG 2 /* * Your basic SMP spinlocks, allowing only a single CPU anywhere diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/include/linux/blkdev.h linux/include/linux/blkdev.h --- linux-2.3.43-6-clean/include/linux/blkdev.h Tue Feb 8 22:59:20 2000 +++ linux/include/linux/blkdev.h Thu Feb 10 01:35:41 2000 @@ -48,6 +48,7 @@ typedef void (make_request_fn) (int rw, struct buffer_head *bh); typedef void (plug_device_fn) (request_queue_t *q, kdev_t device); typedef void (unplug_device_fn) (void *q); +typedef int (merge_bh_fn) (struct request *req, struct buffer_head *bh); struct request_queue { @@ -55,6 +56,7 @@ request_fn_proc * request_fn; merge_request_fn * merge_fn; merge_requests_fn * merge_requests_fn; + merge_bh_fn * merge_bh_fn; make_request_fn * make_request_fn; plug_device_fn * plug_device_fn; /* diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/include/linux/cdrom.h linux/include/linux/cdrom.h --- linux-2.3.43-6-clean/include/linux/cdrom.h Tue Feb 1 15:45:00 2000 +++ linux/include/linux/cdrom.h Wed Feb 2 22:47:13 2000 @@ -796,9 +796,9 @@ __u8 reserved1 : 3; __u8 erasable : 1; __u8 border_status : 2; - __u8 disc_border : 2; + __u8 disc_status : 2; #elif defined(__LITTLE_ENDIAN_BITFIELD) - __u8 disc_border : 2; + __u8 disc_status : 2; __u8 border_status : 2; __u8 erasable : 1; __u8 reserved1 : 3; @@ -981,7 +981,7 @@ #endif __u8 session_format; __u8 reserved6; - __u32 packet_size; + __u32 packet_size __attribute__((packed)); __u16 audio_pause; __u8 mcn[16]; __u8 isrc[16]; diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/include/linux/fs.h linux/include/linux/fs.h --- linux-2.3.43-6-clean/include/linux/fs.h Tue Feb 1 15:45:00 2000 +++ linux/include/linux/fs.h Thu Feb 10 01:35:41 2000 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,7 @@ #define WRITE 1 #define READA 2 /* read-ahead - don't block if no resources */ #define SPECIAL 4 /* For non-blockdevice requests in request queue */ +#define WRITE_PACKET 5 /* for packet writers */ #define WRITERAW 5 /* raw write - don't play with buffer lists */ @@ -230,6 +232,7 @@ unsigned long b_rsector; /* Real buffer location on disk */ wait_queue_head_t b_wait; struct kiobuf * b_kiobuf; /* kiobuf which owns this IO */ + struct timer_list timer; }; typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); @@ -852,20 +855,17 @@ } extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh, int flag)); +extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh, int flag)); #define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state) -extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag) -{ - if (!atomic_set_buffer_dirty(bh)) - __mark_buffer_dirty(bh, flag); -} - extern void balance_dirty(kdev_t); extern int check_disk_change(kdev_t); extern int invalidate_inodes(struct super_block *); extern void invalidate_inode_pages(struct inode *); -extern void invalidate_buffers(kdev_t); +#define invalidate_buffers(dev) __invalidate_buffers((dev), 0) +#define destroy_buffers(dev) __invalidate_buffers((dev), 1) +extern void __invalidate_buffers(kdev_t dev, int); extern int floppy_is_wp(int); extern void sync_inodes(kdev_t); extern void write_inode_now(struct inode *); diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/include/linux/locks.h linux/include/linux/locks.h --- linux-2.3.43-6-clean/include/linux/locks.h Sat Jan 8 00:31:15 2000 +++ linux/include/linux/locks.h Thu Feb 10 01:35:41 2000 @@ -8,17 +8,8 @@ #include #endif -/* - * Buffer cache locking - note that interrupts may only unlock, not - * lock buffers. - */ -extern void __wait_on_buffer(struct buffer_head *); - -extern inline void wait_on_buffer(struct buffer_head * bh) -{ - if (test_bit(BH_Lock, &bh->b_state)) - __wait_on_buffer(bh); -} +extern void wait_on_buffer(struct buffer_head * bh); +extern void __wait_on_buffer(struct buffer_head * bh); extern inline void lock_buffer(struct buffer_head * bh) { diff -urN --exclude-from /home/axboe/cdrom/exclude-from linux-2.3.43-6-clean/kernel/ksyms.c linux/kernel/ksyms.c --- linux-2.3.43-6-clean/kernel/ksyms.c Tue Feb 8 22:59:20 2000 +++ linux/kernel/ksyms.c Thu Feb 10 00:37:46 2000 @@ -151,6 +151,7 @@ EXPORT_SYMBOL(d_alloc); EXPORT_SYMBOL(d_lookup); EXPORT_SYMBOL(d_path); +EXPORT_SYMBOL(mark_buffer_dirty); EXPORT_SYMBOL(__mark_buffer_dirty); EXPORT_SYMBOL(__mark_inode_dirty); EXPORT_SYMBOL(free_kiovec); @@ -163,7 +164,7 @@ EXPORT_SYMBOL(put_filp); EXPORT_SYMBOL(files_lock); EXPORT_SYMBOL(check_disk_change); -EXPORT_SYMBOL(invalidate_buffers); +EXPORT_SYMBOL(__invalidate_buffers); EXPORT_SYMBOL(invalidate_inodes); EXPORT_SYMBOL(invalidate_inode_pages); EXPORT_SYMBOL(truncate_inode_pages); @@ -176,12 +177,14 @@ EXPORT_SYMBOL(get_hardblocksize); EXPORT_SYMBOL(set_blocksize); EXPORT_SYMBOL(getblk); +EXPORT_SYMBOL(show_buffers); EXPORT_SYMBOL(bread); EXPORT_SYMBOL(breada); EXPORT_SYMBOL(__brelse); EXPORT_SYMBOL(__bforget); EXPORT_SYMBOL(ll_rw_block); EXPORT_SYMBOL(__wait_on_buffer); +EXPORT_SYMBOL(wait_on_buffer); EXPORT_SYMBOL(___wait_on_page); EXPORT_SYMBOL(block_read_full_page); EXPORT_SYMBOL(block_write_full_page); @@ -223,6 +226,12 @@ EXPORT_SYMBOL(page_readlink); EXPORT_SYMBOL(page_follow_link); EXPORT_SYMBOL(block_symlink); + +extern void end_buffer_io_async(struct buffer_head *bh, int uptodate); +extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate); + +EXPORT_SYMBOL(end_buffer_io_async); +EXPORT_SYMBOL(end_buffer_io_sync); /* for stackable file systems (lofs, wrapfs, etc.) */ EXPORT_SYMBOL(add_to_page_cache);