diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/Documentation/Configure.help linux/Documentation/Configure.help --- /opt/kernel/linux-2.4.0-test10/Documentation/Configure.help Tue Oct 31 06:12:34 2000 +++ linux/Documentation/Configure.help Tue Oct 31 06:14:11 2000 @@ -511,6 +511,31 @@ say M here and read Documentation/modules.txt. The module will be called ide-cd.o. + +Packet writing on CD/DVD media (EXPERIMENTAL) +CONFIG_CDROM_PKTCDVD + If you have a CDROM drive that supports packet writing, say Y to + include preliminary support. It should work with any MMC/Mt Fuji + complain ATAPI or SCSI drive, which is just about any newer CD + writer. + + Currently only writing to CD-RW discs is possible. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read Documentation/modules.txt. The module will be + called packet.o. + +Free buffers +CONFIG_CDROM_PKTCDVD_BUFFERS + This controls the amount of free buffers that are allocated for + data gathering. More buffers speed up big writes at the cost of + latency and a bigger memory requirement (2KB per buffer). + + This option has no effect at all if the CD-RW is used with other + file systems (or without a file system). + + Include IDE/ATAPI TAPE support CONFIG_BLK_DEV_IDETAPE If you have an IDE tape drive using the ATAPI protocol, say Y. diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/arch/sparc64/kernel/ioctl32.c linux/arch/sparc64/kernel/ioctl32.c --- /opt/kernel/linux-2.4.0-test10/arch/sparc64/kernel/ioctl32.c Mon Oct 30 08:41:32 2000 +++ linux/arch/sparc64/kernel/ioctl32.c Sat Oct 28 22:34:09 2000 @@ -86,6 +86,7 @@ #include #include #include +#include /* Use this to get at 32-bit user passed pointers. See sys_sparc32.c for description about these. */ @@ -714,6 +715,37 @@ return ret; } +struct packet_stats32 { + u32 bh_s; + u32 bh_e; + u32 bh_w; + u32 bh_r; +}; + +static inline int pkt_getstats(unsigned int fd, unsigned int cmd, unsigned long arg) +{ + struct packet_stats p; + struct packet_stats32 p32; + mm_segment_t old_fs = get_fs(); + int ret; + + ret = copy_from_user (&p32, (struct packet_stats32 *)arg, sizeof(struct packet_stats32)); + if (ret) + return -EFAULT; +#define P(x) (p.x = (unsigned long)p32.x) + P(bh_s); + P(bh_e); + P(bh_w); + P(bh_r); +#undef P + + set_fs (KERNEL_DS); + ret = sys_ioctl (fd, cmd, (long)&p); + set_fs (old_fs); + + return ret; +} + struct hd_geometry32 { unsigned char heads; unsigned char sectors; @@ -3581,6 +3613,12 @@ /* elevator */ COMPATIBLE_IOCTL(BLKELVGET) COMPATIBLE_IOCTL(BLKELVSET) +/* Big X, CDRW Packet Driver */ +#if defined(CONFIG_CDROM_PKTCDVD) +COMPATIBLE_IOCTL(PACKET_SETUP_DEV) +COMPATIBLE_IOCTL(PACKET_TEARDOWN_DEV) +HANDLE_IOCTL(PACKET_GET_STATS, pkt_getstats) +#endif /* CONFIG_CDROM_PKTCDVD */ /* And these ioctls need translation */ HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32) HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf) diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/Config.in linux/drivers/block/Config.in --- /opt/kernel/linux-2.4.0-test10/drivers/block/Config.in Fri Sep 22 17:11:37 2000 +++ linux/drivers/block/Config.in Mon Oct 23 01:31:43 2000 @@ -37,6 +37,11 @@ dep_tristate 'Compaq CISS Array support' CONFIG_BLK_CPQ_CISS_DA $CONFIG_PCI dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960 $CONFIG_PCI +tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD +if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then + int ' Free buffers for data gathering' CONFIG_CDROM_PKTCDVD_BUFFERS 256 +fi + tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/DAC960.c linux/drivers/block/DAC960.c --- /opt/kernel/linux-2.4.0-test10/drivers/block/DAC960.c Mon Oct 30 08:41:32 2000 +++ linux/drivers/block/DAC960.c Mon Oct 23 01:31:43 2000 @@ -1825,7 +1825,6 @@ Request->nr_segments < Controller->DriverScatterGatherLimit) { Request->nr_segments++; - RequestQueue->elevator.nr_segments++; return true; } return false; @@ -1849,7 +1848,6 @@ Request->nr_segments < Controller->DriverScatterGatherLimit) { Request->nr_segments++; - RequestQueue->elevator.nr_segments++; return true; } return false; @@ -1879,7 +1877,6 @@ if (TotalSegments > MaxSegments || TotalSegments > Controller->DriverScatterGatherLimit) return false; - RequestQueue->elevator.nr_segments -= SameSegment; Request->nr_segments = TotalSegments; return true; } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/Makefile linux/drivers/block/Makefile --- /opt/kernel/linux-2.4.0-test10/drivers/block/Makefile Fri Sep 22 17:11:37 2000 +++ linux/drivers/block/Makefile Mon Oct 23 01:31:43 2000 @@ -14,7 +14,7 @@ O_TARGET := block.o -export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o +export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o elevator.o obj-y := ll_rw_blk.o blkpg.o genhd.o elevator.o @@ -35,6 +35,8 @@ obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o + +obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o ifeq ($(CONFIG_PARIDE),y) SUB_DIRS += paride diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/elevator.c linux/drivers/block/elevator.c --- /opt/kernel/linux-2.4.0-test10/drivers/block/elevator.c Tue Aug 22 22:33:46 2000 +++ linux/drivers/block/elevator.c Tue Oct 24 13:12:15 2000 @@ -24,6 +24,7 @@ #include #include #include +#include #include /* @@ -51,50 +52,67 @@ } int elevator_linus_merge(request_queue_t *q, struct request **req, + struct list_head * head, struct buffer_head *bh, int rw, - int *max_sectors, int *max_segments) + int max_sectors, int max_segments) { - struct list_head *entry, *head = &q->queue_head; + struct list_head *entry; unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE; + int front = 0, back = 0; - entry = head; - if (q->head_active && !q->plugged) - head = head->next; - - while ((entry = entry->prev) != head) { + entry = &q->queue_head; + while ((entry = entry->prev) != head && !back && !front) { struct request *__rq = *req = blkdev_entry_to_request(entry); - if (__rq->sem) - continue; + if (__rq->cmd != rw) continue; - if (__rq->nr_sectors + count > *max_sectors) - continue; if (__rq->rq_dev != bh->b_rdev) continue; - if (__rq->sector + __rq->nr_sectors == bh->b_rsector) { + if (__rq->sector + __rq->nr_sectors == bh->b_rsector) + back = 1; + else if (__rq->sector - count == bh->b_rsector) + front = 1; + + if (__rq->nr_sectors + count > max_sectors) + continue; + if (__rq->sem) + continue; + + if (back) { ret = ELEVATOR_BACK_MERGE; break; } if (!__rq->elevator_sequence) break; - if (__rq->sector - count == bh->b_rsector) { - __rq->elevator_sequence--; + if (front) { ret = ELEVATOR_FRONT_MERGE; break; } } + return ret; +} + +void elevator_linus_merge_cleanup(request_queue_t *q, struct request *req, int ret, int front) +{ + struct list_head *entry = &req->queue, *head = &q->queue_head; + + if (front) + req->elevator_sequence--; + /* * second pass scan of requests that got passed over, if any */ - if (ret != ELEVATOR_NO_MERGE && *req) { - while ((entry = entry->next) != &q->queue_head) { - struct request *tmp = blkdev_entry_to_request(entry); - tmp->elevator_sequence--; - } + while ((entry = entry->next) != head) { + struct request *tmp = blkdev_entry_to_request(entry); + tmp->elevator_sequence--; } +} - return ret; +void elevator_linus_merge_req(struct request *req, struct request *next) +{ + if (next->elevator_sequence < req->elevator_sequence) + req->elevator_sequence = next->elevator_sequence; } /* @@ -108,41 +126,46 @@ } /* - * See if we can find a request that is buffer can be coalesced with. + * See if we can find a request that this buffer can be coalesced with. */ int elevator_noop_merge(request_queue_t *q, struct request **req, + struct list_head * head, struct buffer_head *bh, int rw, - int *max_sectors, int *max_segments) + int max_sectors, int max_segments) { - struct list_head *entry, *head = &q->queue_head; + struct list_head *entry; unsigned int count = bh->b_size >> 9; + int back = 0, front = 0; - if (q->head_active && !q->plugged) - head = head->next; - - entry = head; - while ((entry = entry->prev) != head) { + entry = &q->queue_head; + while ((entry = entry->prev) != head && !back && !front) { struct request *__rq = *req = blkdev_entry_to_request(entry); - if (__rq->sem) - continue; + if (__rq->cmd != rw) continue; - if (__rq->nr_sectors + count > *max_sectors) - continue; if (__rq->rq_dev != bh->b_rdev) continue; if (__rq->sector + __rq->nr_sectors == bh->b_rsector) + back = 1; + else if (__rq->sector - count == bh->b_rsector) + front = 1; + + if (__rq->nr_sectors + count > max_sectors) + continue; + if (__rq->sem) + continue; + + if (back) return ELEVATOR_BACK_MERGE; - if (__rq->sector - count == bh->b_rsector) + if (front) return ELEVATOR_FRONT_MERGE; } return ELEVATOR_NO_MERGE; } -/* - * The noop "elevator" does not do any accounting - */ -void elevator_noop_dequeue(struct request *req) {} +void elevator_noop_merge_cleanup(request_queue_t *q, struct request *req, int ret, int front) {} + +void elevator_noop_merge_req(struct request *req, struct request *next) {} int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg) { @@ -183,3 +206,5 @@ *elevator = type; elevator->queue_ID = queue_ID++; } + +EXPORT_SYMBOL(elevator_init); diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- /opt/kernel/linux-2.4.0-test10/drivers/block/ll_rw_blk.c Mon Oct 30 08:41:32 2000 +++ linux/drivers/block/ll_rw_blk.c Tue Oct 31 07:46:56 2000 @@ -125,7 +125,7 @@ return max_sectors[MAJOR(dev)][MINOR(dev)]; } -static inline request_queue_t *__blk_get_queue(kdev_t dev) +inline request_queue_t *__blk_get_queue(kdev_t dev) { struct blk_dev_struct *bdev = blk_dev + MAJOR(dev); @@ -153,17 +153,14 @@ static int __blk_cleanup_queue(struct list_head *head) { - struct list_head *entry; struct request *rq; int i = 0; if (list_empty(head)) return 0; - entry = head->next; do { - rq = list_entry(entry, struct request, table); - entry = entry->next; + rq = list_entry(head->next, struct request, table); list_del(&rq->table); kmem_cache_free(request_cachep, rq); i++; @@ -280,7 +277,6 @@ { if (req->nr_segments < max_segments) { req->nr_segments++; - q->elevator.nr_segments++; return 1; } return 0; @@ -317,7 +313,6 @@ if (total_segments > max_segments) return 0; - q->elevator.nr_segments -= same_segment; req->nr_segments = total_segments; return 1; } @@ -354,7 +349,7 @@ } } -static void generic_unplug_device(void *data) +void generic_unplug_device(void *data) { request_queue_t *q = (request_queue_t *) data; unsigned long flags; @@ -369,13 +364,15 @@ struct request *rq; int i; + INIT_LIST_HEAD(&q->request_freelist[READ]); + INIT_LIST_HEAD(&q->request_freelist[WRITE]); + /* - * Divide requests in half between read and write. This used to - * be a 2/3 advantage for reads, but now reads can steal from - * the write free list. + * Divide requests in half between read and write */ for (i = 0; i < QUEUE_NR_REQUESTS; i++) { rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL); + memset(rq, 0, sizeof(struct request)); rq->rq_status = RQ_INACTIVE; list_add(&rq->table, &q->request_freelist[i & 1]); } @@ -414,15 +411,13 @@ * blk_queue_headactive(). * * Note: - * blk_init_queue() must be paired with a blk_cleanup-queue() call + * blk_init_queue() must be paired with a blk_cleanup_queue() call * when the block device is deactivated (such as at module unload). **/ static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh); void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) { INIT_LIST_HEAD(&q->queue_head); - INIT_LIST_HEAD(&q->request_freelist[READ]); - INIT_LIST_HEAD(&q->request_freelist[WRITE]); elevator_init(&q->elevator, ELEVATOR_LINUS); blk_init_free_list(q); q->request_fn = rfn; @@ -444,7 +439,6 @@ q->head_active = 1; } - #define blkdev_free_rq(list) list_entry((list)->next, struct request, table); /* * Get a free request. io_request_lock must be held and interrupts @@ -452,34 +446,22 @@ */ static inline struct request *get_request(request_queue_t *q, int rw) { - struct list_head *list = &q->request_freelist[rw]; struct request *rq; + int list_rw = rw; - /* - * Reads get preferential treatment and are allowed to steal - * from the write free list if necessary. - */ - if (!list_empty(list)) { - rq = blkdev_free_rq(list); + if (!list_empty(&q->request_freelist[rw])) goto got_rq; - } - /* - * if the WRITE list is non-empty, we know that rw is READ - * and that the READ list is empty. allow reads to 'steal' - * from the WRITE list. - */ if (!list_empty(&q->request_freelist[WRITE])) { - list = &q->request_freelist[WRITE]; - rq = blkdev_free_rq(list); + list_rw = WRITE; goto got_rq; } return NULL; - got_rq: + rq = blkdev_free_rq(&q->request_freelist[list_rw]); list_del(&rq->table); - rq->free_list = list; + rq->free_list = list_rw; rq->rq_status = RQ_ACTIVE; rq->special = NULL; rq->q = q; @@ -611,16 +593,19 @@ */ void inline blkdev_release_request(struct request *req) { + request_queue_t *q = req->q; + req->rq_status = RQ_INACTIVE; + req->q = NULL; /* * Request may not have originated from ll_rw_blk */ - if (req->free_list) { - list_add(&req->table, req->free_list); - req->free_list = NULL; - wake_up(&req->q->wait_for_request); - } + if (q == NULL) + return; + + list_add(&req->table, &q->request_freelist[req->free_list]); + wake_up(&q->wait_for_request); } /* @@ -647,10 +632,12 @@ if(!(q->merge_requests_fn)(q, req, next, max_segments)) return; + q->elevator.elevator_merge_req_fn(req, next); req->bhtail->b_reqnext = next->bh; req->bhtail = next->bhtail; req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; list_del(&next->queue); + req->end_io = next->end_io; blkdev_release_request(next); } @@ -683,14 +670,15 @@ { unsigned int sector, count; int max_segments = MAX_SEGMENTS; - struct request * req = NULL; + struct request * req = NULL, *freereq = NULL; int rw_ahead, max_sectors, el_ret; - struct list_head *head = &q->queue_head; + struct list_head *head; int latency; elevator_t *elevator = &q->elevator; count = bh->b_size >> 9; sector = bh->b_rsector; + bh->b_queue = q; rw_ahead = 0; /* normal case; gets changed below for READA */ switch (rw) { @@ -698,6 +686,7 @@ rw_ahead = 1; rw = READ; /* drop into READ */ case READ: + break; case WRITE: break; default: @@ -729,33 +718,30 @@ latency = elevator_request_latency(elevator, rw); +again: /* * Now we acquire the request spinlock, we have to be mega careful * not to schedule or do something nonatomic */ spin_lock_irq(&io_request_lock); - /* - * skip first entry, for devices with active queue head - */ - if (q->head_active && !q->plugged) - head = head->next; - + head = &q->queue_head; if (list_empty(head)) { q->plug_device_fn(q, bh->b_rdev); /* is atomic */ goto get_rq; - } + } else if (q->head_active && !q->plugged) + head = head->next; - el_ret = elevator->elevator_merge_fn(q, &req, bh, rw, &max_sectors, &max_segments); + el_ret = elevator->elevator_merge_fn(q, &req, head, bh, rw, max_sectors, max_segments); switch (el_ret) { case ELEVATOR_BACK_MERGE: if (!q->back_merge_fn(q, req, bh, max_segments)) break; + elevator->elevator_merge_cleanup_fn(q, req, el_ret, 0); req->bhtail->b_reqnext = bh; req->bhtail = bh; req->nr_sectors = req->hard_nr_sectors += count; - req->e = elevator; drive_stat_acct(req->rq_dev, req->cmd, count, 0); attempt_back_merge(q, req, max_sectors, max_segments); goto out; @@ -763,16 +749,20 @@ case ELEVATOR_FRONT_MERGE: if (!q->front_merge_fn(q, req, bh, max_segments)) break; + elevator->elevator_merge_cleanup_fn(q, req, el_ret, 1); bh->b_reqnext = req->bh; req->bh = bh; req->buffer = bh->b_data; req->current_nr_sectors = count; req->sector = req->hard_sector = sector; req->nr_sectors = req->hard_nr_sectors += count; - req->e = elevator; drive_stat_acct(req->rq_dev, req->cmd, count, 0); attempt_front_merge(q, head, req, max_sectors, max_segments); goto out; + + case ELEVATOR_HOLE_MERGE: + goto out; + /* * elevator says don't/can't merge. get new request */ @@ -791,19 +781,16 @@ * are not crucial. */ get_rq: - if ((req = get_request(q, rw)) == NULL) { + if (freereq) { + req = freereq; + freereq = NULL; + } else if ((req = get_request(q, rw)) == NULL) { spin_unlock_irq(&io_request_lock); if (rw_ahead) goto end_io; - req = __get_request_wait(q, rw); - spin_lock_irq(&io_request_lock); - - if (q->head_active) { - head = &q->queue_head; - if (!q->plugged) - head = head->next; - } + freereq = __get_request_wait(q, rw); + goto again; } /* fill up the request-info, and add it to the queue */ @@ -819,11 +806,10 @@ req->bh = bh; req->bhtail = bh; req->rq_dev = bh->b_rdev; - req->e = elevator; add_request(q, req, head, latency); out: - if (!q->plugged) - (q->request_fn)(q); + if (freereq) + blkdev_release_request(freereq); spin_unlock_irq(&io_request_lock); return 0; end_io: @@ -878,7 +864,6 @@ buffer_IO_error(bh); break; } - } while (q->make_request_fn(q, rw, bh)); } @@ -996,8 +981,11 @@ if ((bh = req->bh) != NULL) { nsect = bh->b_size >> 9; req->bh = bh->b_reqnext; + if (req->bh && (bh->b_rsector + (bh->b_size >> 9)) != req->bh->b_rsector) + printk("%s: %lu is followed by %lu\n", name, bh->b_rsector, req->bh->b_rsector); bh->b_reqnext = NULL; bh->b_end_io(bh, uptodate); + bh->b_queue = NULL; if ((bh = req->bh) != NULL) { req->hard_sector += nsect; req->hard_nr_sectors -= nsect; @@ -1018,12 +1006,10 @@ void end_that_request_last(struct request *req) { - if (req->e) { - printk("end_that_request_last called with non-dequeued req\n"); - BUG(); - } if (req->sem != NULL) up(req->sem); + if (req->end_io) + req->end_io(req); blkdev_release_request(req); } @@ -1159,9 +1145,11 @@ EXPORT_SYMBOL(end_that_request_last); EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_get_queue); +EXPORT_SYMBOL(__blk_get_queue); EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_queue_headactive); EXPORT_SYMBOL(blk_queue_pluggable); EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(generic_make_request); EXPORT_SYMBOL(blkdev_release_request); +EXPORT_SYMBOL(generic_unplug_device); diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/paride/pd.c linux/drivers/block/paride/pd.c --- /opt/kernel/linux-2.4.0-test10/drivers/block/paride/pd.c Tue Jun 20 07:24:52 2000 +++ linux/drivers/block/paride/pd.c Mon Oct 23 01:31:43 2000 @@ -392,7 +392,6 @@ if (req->nr_segments < max_segments) { req->nr_segments++; - q->elevator.nr_segments++; return 1; } return 0; @@ -432,7 +431,6 @@ if (total_segments > max_segments) return 0; - q->elevator.nr_segments -= same_segment; req->nr_segments = total_segments; return 1; } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/paride/pf.c linux/drivers/block/paride/pf.c --- /opt/kernel/linux-2.4.0-test10/drivers/block/paride/pf.c Tue Apr 4 19:25:14 2000 +++ linux/drivers/block/paride/pf.c Mon Oct 23 01:31:43 2000 @@ -346,7 +346,6 @@ if (req->nr_segments < max_segments) { req->nr_segments++; - q->elevator.nr_segments++; return 1; } return 0; @@ -386,7 +385,6 @@ if (total_segments > max_segments) return 0; - q->elevator.nr_segments -= same_segment; req->nr_segments = total_segments; return 1; } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/block/pktcdvd.c linux/drivers/block/pktcdvd.c --- /opt/kernel/linux-2.4.0-test10/drivers/block/pktcdvd.c Wed Dec 31 16:00:00 1969 +++ linux/drivers/block/pktcdvd.c Tue Oct 31 08:43:54 2000 @@ -0,0 +1,2106 @@ +/* + * Copyright (C) 2000 Jens Axboe + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and + * DVD-RW devices (aka an exercise in block layer masturbation) + * + * + * TODO: (circa order of when I will fix it) + * - Only able to write on CD-RW media right now. + * - check host application code on media and set it in write page + * - Generic interface for UDF to submit large packets for variable length + * packet writing (kiovec of dirty pages) + * - (in correlation with above) interface for UDF <-> packet to negotiate + * a new location when a write fails. + * - handle OPC, especially for -RW media + * + * ------------------------------------------------------------------------ + * + * 0.0.2d (26/10/2000) + * - (scsi) use implicit segment recounting for all hba's + * - fix speed setting, was consistenly off on most drives + * - only print capacity when opening for write + * - fix off-by-two error in getting/setting write+read speed (affected + * reporting as well as actual speed used) + * - possible to enable write caching on drive + * - do ioctl marshalling on sparc64 from Ben Collins + * - avoid unaligned access on flags, should have been unsigned long of course + * - fixed missed wakeup in kpacketd + * - b_dev error (two places) + * - fix buffer head b_count bugs + * - fix hole merge bug, where tail could be added twice + * - fsync and invalidate buffers on close + * - check hash table for buffers first before using our own + * - add read-ahead + * - fixed several list races + * - fix proc reporting for more than one device + * - change to O_CREAT for creating devices + * - added media_change hook + * - added free buffers config option + * - pkt_lock_tray fails on failed open (and oopses), remove it. unlock + * is done explicitly in pkt_remove dev anyway. + * - added proper elevator insertion (should probably be part of elevator.c) + * - moved kernel thread info to private device, spawn one for each writer + * - added separate buffer list for dirty packet buffers + * - fixed nasty data corruption bug + * - remember to account request even when we don't gather data for it + * - add ioctl to force wakeup of kernel thread (for debug) + * - fixed packet size setting bug on zero detected + * - changed a lot of the proc reporting to be more readable to "humans" + * - set full speed for read-only opens + * + * 0.0.2c (08/09/2000) + * - inc usage count of buffer heads + * - add internal buffer pool to avoid deadlock on oom + * - gather data for as many buffers as we have, before initiating write. this + * allows the laser to stay on longer, giving better performance. + * - fix always busy when tray can't be locked + * - remove request duplication nastiness, inject directly into the target + * - adapted to devfs and elevator changes + * - added proc interface + * + * 0.0.2b (21/06/2000) + * - fix io_request_lock typos (missing '&') + * - grab pkt_sem before invoking pkt_handle_queue + * - SCSI uses queuedata too, mirror that in pd->queuedata (hack) + * - remove SCSI sr debug messages + * - really activate empty block querying (requires cvs UDF, CDRW branch) + * - make sure sync_buffers doesn't consider us, or we can deadlock + * - make sure people don't swap on us (for now ;) + * + * 0.0.2a (19/06/2000) + * - add kpacketd kernel thread to handle actual data gathering + * - pd->pkt_dev is now real device, not just minor + * - add support for super_operations block_empty fn, to query fs for + * unused blocks that don't need reading + * - "cache" blocks that are contained in the UDF file/dir packet + * - rewrite pkt_gather_data to a one-step solution + * - add private pktcdvd elevator + * - shutdown write access to device upon write failure + * - fix off-by-one bug in capacity + * - setup sourceforge project (packet-cd.sourceforge.net) + * - add more blk ioctls to pkt_ioctl + * - set inactive request queue head + * - change panic calls to BUG, better with kdb + * - have pkt_gather_data check correct block size and kill rq if wrong + * - rework locking + * - introduce per-pd queues, simplifies pkt_request + * - store pd in queuedata + * + *************************************************************************/ + +#define VERSION_CODE "v0.0.2d 31/10/2000 Jens Axboe (axboe@suse.de)" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * 32 buffers of 2048 bytes + */ +#define PACKET_MAX_SIZE 32 + +#define NEXT_BH(bh, nbh) (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector) + +#define BH_IN_ORDER(b1, b2) ((b1)->b_rsector < (b2)->b_rsector) + +#define CONTIG_BH(b1, b2) ((b1)->b_data + (b1)->b_size == (b2)->b_data) + +#define ZONE(sector, pd) \ + (((sector) + ((pd)->offset)) - (((sector) + ((pd)->offset)) % ((pd)->settings.size))) + +static int *pkt_sizes; +static int *pkt_blksize; +static int *pkt_readahead; +static struct pktcdvd_device *pkt_devs; + +/* + * a bit of a kludge, but we want to be able to pass both real and packet + * dev and get the right one. + */ +static inline struct pktcdvd_device *pkt_find_dev(kdev_t dev) +{ + int i; + + for (i = 0; i < MAX_WRITERS; i++) + if (pkt_devs[i].dev == dev || pkt_devs[i].pkt_dev == dev) + return &pkt_devs[i]; + + return NULL; +} + +static void pkt_recheck_segments(struct request *rq) +{ + struct buffer_head *bh; + int nr_segments = 1, sectors; + + bh = rq->bh; + sectors = bh->b_size >> 9; + + while (bh->b_reqnext) { + if (!CONTIG_BH(bh, bh->b_reqnext)) + nr_segments++; + bh = bh->b_reqnext; + sectors += bh->b_size >> 9; + } + + rq->nr_segments = nr_segments; + + if (sectors != rq->nr_sectors) { + printk("tell jens, %u != %lu\n", sectors, rq->nr_sectors); + BUG(); + } +} + +/* + * The following three functions are the plugins to the ll_rw_blk + * layer and decides whether a given request / buffer head can be + * merged. We differ in a couple of ways from "normal" block + * devices: + * + * - don't merge when the buffer / request crosses a packet block + * boundary + * - merge buffer head even though it can't be added directly to the + * front or back of the list. this gives us better performance, since + * what would otherwise require multiple requests can now be handled + * in one (hole merging) + * - we only care about write merging, reads use device original defaults. + * + * The device original merge_ functions are stored in the packet device + * queue (pd->q) + * + */ +static inline int pkt_do_merge(request_queue_t *q, struct request *rq, + struct buffer_head *bh, int max_segs, + merge_request_fn *merge_fn, + struct pktcdvd_device *pd) +{ + void *ptr = q->queuedata; + int ret; + + if (rq->cmd == WRITE && ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd)) + return ELEVATOR_NO_MERGE; + + q->queuedata = pd->cdrw.queuedata; + ret = merge_fn(q, rq, bh, max_segs); + q->queuedata = ptr; + return ret; +} + +static int pkt_front_merge_fn(request_queue_t *q, struct request *rq, + struct buffer_head *bh, int max_segs) +{ + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); + + return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd); +} + +static int pkt_back_merge_fn(request_queue_t *q, struct request *rq, + struct buffer_head *bh, int max_segs) +{ + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); + + return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd); +} + +/* + * rules similar to above + */ +static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq, + struct request *next, int max_segs) +{ + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); + struct packet_cdrw *cdrw = &pd->cdrw; + void *ptr = q->queuedata; + int ret; + + if (ZONE(rq->sector, pd) != ZONE(next->sector + next->nr_sectors, pd)) + return 0; + + q->queuedata = cdrw->queuedata; + ret = cdrw->merge_requests_fn(q, rq, next, max_segs); + q->queuedata = ptr; + return ret; +} + +/* + * The logic here is try queue default merge first and if is says ok, + * fine. If not, we find the appropriate place to insert this buffer + * head in the list, so that they are ordered before we receive the + * request. + */ +static int pkt_hole_merge_fn(request_queue_t *q, struct request *rq, + struct buffer_head *bh, int max_segs) +{ + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); + struct buffer_head *tmp; + + /* + * holes only supported for writing + */ + if (rq->cmd == READ) + return ELEVATOR_NO_MERGE; + + /* + * there may be a possibility for the back buffer to be + * added twice. if this triggers, let me know. + */ + if (bh->b_rsector == rq->bhtail->b_rsector) { + printk("pktcdvd: tell jens, tail dupe (%lu)\n", bh->b_rsector); + BUG(); + } + + if (ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd)) + return ELEVATOR_NO_MERGE; + + if (rq->nr_segments >= max_segs) + return ELEVATOR_NO_MERGE; + + /* + * stuff in front? + */ + if (bh->b_rsector < rq->sector) { + bh->b_reqnext = rq->bh; + rq->bh = bh; + rq->sector = rq->hard_sector = bh->b_rsector; + rq->current_nr_sectors = bh->b_size >> 9; + goto out; + } + + /* + * stuff in back? + */ + if (bh->b_rsector > rq->bhtail->b_rsector) { + rq->bhtail->b_reqnext = bh; + rq->bhtail = bh; + goto out; + } + + /* + * find sweet spot to insert buffer + */ + for (tmp = rq->bh; tmp->b_reqnext; tmp = tmp->b_reqnext) { + /* + * debug + */ + if (tmp->b_rsector == bh->b_rsector) { + spin_unlock_irq(&io_request_lock); + BUG(); + } + if (BH_IN_ORDER(tmp, bh) && BH_IN_ORDER(bh, tmp->b_reqnext)) + break; + } + + if (tmp->b_reqnext && tmp->b_reqnext->b_rsector == bh->b_rsector) { + spin_unlock_irq(&io_request_lock); + BUG(); + } + + bh->b_reqnext = tmp->b_reqnext; + tmp->b_reqnext = bh; +out: + rq->buffer = rq->bh->b_data; + rq->nr_sectors += (bh->b_size >> 9); + rq->hard_nr_sectors = rq->nr_sectors; + rq->nr_segments++; + return ELEVATOR_HOLE_MERGE; +} + +static int pkt_grow_bhlist(struct pktcdvd_device *pd, int count) +{ + struct packet_cdrw *cdrw = &pd->cdrw; + struct buffer_head *bh; + int i = 0; + + while (i < count) { + bh = kmalloc(sizeof(struct buffer_head), GFP_KERNEL); + if (bh == NULL) + break; + + bh->b_data = kmalloc(CD_FRAMESIZE, GFP_KERNEL); + if (bh->b_data == NULL) { + kfree(bh); + break; + } + spin_lock_irq(&pd->lock); + bh->b_pprev = &cdrw->bhlist; + bh->b_next = cdrw->bhlist; + cdrw->bhlist = bh; + spin_unlock_irq(&pd->lock); + + bh->b_size = CD_FRAMESIZE; + bh->b_list = PKT_BUF_LIST; + atomic_inc(&cdrw->free_bh); + i++; + } + return i; +} + +static int pkt_shrink_bhlist(struct pktcdvd_device *pd, int count) +{ + struct packet_cdrw *cdrw = &pd->cdrw; + struct buffer_head *bh; + int i = 0; + + while ((i < count) && cdrw->bhlist) { + spin_lock_irq(&pd->lock); + bh = cdrw->bhlist; + cdrw->bhlist = bh->b_next; + spin_unlock_irq(&pd->lock); + if (bh->b_list != PKT_BUF_LIST) + BUG(); + kfree(bh->b_data); + kfree(bh); + atomic_dec(&cdrw->free_bh); + i++; + } + return i; +} + +static request_queue_t *pkt_return_queue(kdev_t dev) +{ + struct pktcdvd_device *pd = pkt_find_dev(dev); + + return &pd->cdrw.r_queue; +} + +static void pkt_end_io_read(struct buffer_head *bh, int uptodate) +{ + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); +} + +/* + * if the buffer is already in the buffer cache, grab it if we can lock + * it down + */ +static inline struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, + int size) +{ +#if 0 + struct buffer_head *bh; + + if ((bh = get_hash_table(dev, block, size))) { + if (!test_and_set_bit(BH_Lock, &bh->b_state)) { + atomic_set_buffer_clean(bh); + return bh; + } + printk("buffer %lu was already locked\n", bh->b_rsector); + brelse(bh); + } +#endif + + return NULL; +} + +static void pkt_end_io_write(struct buffer_head *, int); + +static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd, + unsigned long sector, int size) +{ + struct buffer_head *bh; + + if ((bh = pkt_get_hash(pd->pkt_dev, sector / (size >> 9), size))) { + bh->b_private = pd; + bh->b_end_io = pkt_end_io_write; + goto out; + } + + /* + * should not happen... + */ + if (!atomic_read(&pd->cdrw.free_bh)) { + printk("pktcdvd: no buffers available!\n"); + BUG(); + } + + atomic_dec(&pd->cdrw.free_bh); + atomic_inc(&pd->cdrw.pending_bh); + + spin_lock_irq(&pd->lock); + bh = pd->cdrw.bhlist; + pd->cdrw.bhlist = bh->b_next; + bh->b_next = NULL; + spin_unlock_irq(&pd->lock); + + init_buffer(bh, pkt_end_io_read, pd); + + bh->b_next_free = NULL; + bh->b_prev_free = NULL; + bh->b_this_page = NULL; + bh->b_pprev = NULL; + bh->b_reqnext = NULL; + + init_waitqueue_head(&bh->b_wait); + atomic_set(&bh->b_count, 1); + bh->b_blocknr = sector / (size >> 9); + bh->b_list = PKT_BUF_LIST; + bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req); + +out: + bh->b_rsector = sector; + bh->b_rdev = pd->dev; + return bh; +} + +static void pkt_put_buffer(struct buffer_head *bh) +{ + struct pktcdvd_device *pd = bh->b_private; + unsigned long flags; + + if (bh->b_list != PKT_BUF_LIST) + BUG(); + + if (atomic_read(&bh->b_count)) + printk("pktcdvd: put_buffer: busy buffer\n"); + + bh->b_private = NULL; + bh->b_state = 0; + bh->b_reqnext = NULL; + + spin_lock_irqsave(&pd->lock, flags); + bh->b_next = pd->cdrw.bhlist; + pd->cdrw.bhlist = bh; + spin_unlock_irqrestore(&pd->lock, flags); + atomic_inc(&pd->cdrw.free_bh); + atomic_dec(&pd->cdrw.pending_bh); +} + +/* + * we use this as our default b_end_io handler, since we need to take + * the entire request off the list if just on of the clusters fail. + * later one this should also talk to UDF about relocating blocks -- for + * now we just drop the rq entirely. when doing the relocating we must also + * lock the bh down to ensure that we can easily reconstruct the write should + * it fail. + */ +static void pkt_end_io_write(struct buffer_head *bh, int uptodate) +{ + struct pktcdvd_device *pd = (struct pktcdvd_device *) bh->b_private; + + atomic_set_buffer_clean(bh); + clear_bit(BH_Req, &bh->b_state); + + mark_buffer_uptodate(bh, uptodate); + unlock_buffer(bh); + + if (bh->b_list == PKT_BUF_LIST) { + brelse(bh); + pkt_put_buffer(bh); + } + + /* + * obviously, more needs to be done here. + */ + if (!uptodate) { + printk("pktcdvd: %s: write error\n", pd->name); + set_bit(PACKET_READONLY, &pd->flags); + } + pd->stats.bh_e++; +} + +static void pkt_init_bh(struct pktcdvd_device *pd, struct request *rq) +{ + struct buffer_head *bh = rq->bh; + unsigned cnt = 0; + + while (bh) { +#if 1 + if (bh->b_list == PKT_BUF_LIST) { + bh->b_private = pd; + bh->b_end_io = pkt_end_io_write; + } +#else + bh->b_end_io = pkt_end_io_write; + bh->b_private = pd; +#endif + + /* + * the buffer better be uptodate, mapped, and locked! + */ + if (!buffer_uptodate(bh)) + printk("%lu not uptodate\n", bh->b_rsector); + if (!buffer_locked(bh)) + printk("%lu not locked\n", bh->b_rsector); + if (!buffer_mapped(bh)) + printk("%lu not mapped\n", bh->b_rsector); + + /* + * if this happens, do report + */ + if (bh->b_reqnext) { + if ((bh->b_rsector + (bh->b_size >> 9)) != bh->b_reqnext->b_rsector) + printk("tell jens, %lu follows %lu\n", bh->b_reqnext->b_rsector, bh->b_rsector); + if (bh->b_rsector >= bh->b_reqnext->b_rsector) + + printk("tell jens, order %lu >= %lu\n", bh->b_rsector, bh->b_reqnext->b_rsector); + } + bh = bh->b_reqnext; + cnt += rq->current_nr_sectors; + } + + if (cnt != rq->nr_sectors) { + printk("botched request %u (%lu)\n", cnt, rq->nr_sectors); + BUG(); + } +} + +/* + * really crude stats for now... + */ +static void pkt_account_rq(struct pktcdvd_device *pd, int read, int written, + int bs) +{ + pd->stats.bh_s += (written / bs); + pd->stats.secs_w += written; + pd->stats.secs_r += read; +} + +/* + * does request span two packets? 0 == yes, 1 == no + */ +static int pkt_same_zone(struct pktcdvd_device *pd, struct request *rq) +{ + if (!pd->settings.size) + return 0; + + return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd); +} + +#if defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) +static void pkt_init_buffer(struct buffer_head *bh) +{ + set_bit(BH_Uptodate, &bh->b_state); + set_bit(BH_Dirty, &bh->b_state); + memset(bh->b_data, 0, bh->b_size); +} + +static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh) +{ + struct super_block *sb = get_super(pd->pkt_dev); + struct super_operations *sop = sb ? sb->s_op : NULL; + unsigned long packet = 0, blocknr = bh->b_blocknr; + + if (sop && sop->block_empty) { + if (sop->block_empty(sb, blocknr, &packet)) { + pkt_init_buffer(pd, bh); + return 1; + } + } + + return 0; +} + +#else /* defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) */ + +static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh) +{ + return 0; +} + +#endif /* defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) */ + +/* + * basically just does a ll_rw_block for the bhs given to use, but we + * don't return until we have them. + */ +static void pkt_read_bh(struct pktcdvd_device *pd, struct buffer_head *bh) +{ + /* + * UDF says it's empty, woohoo + */ + if (pkt_sb_empty(pd, bh)) + return; + + atomic_inc(&bh->b_count); + generic_make_request(READ, bh); + VPRINTK("waiting on buffer %lu\n", bh->b_rsector); + lock_buffer(bh); + atomic_dec(&bh->b_count); + VPRINTK("got buffer\n"); + + /* + * read error, whole packet should be remapped + */ + if (!buffer_uptodate(bh)) + printk("huh, %lu not uptodate\n", bh->b_rsector); +} + +inline void __pkt_kill_request(struct request *rq, int uptodate, char *name) +{ + while (end_that_request_first(rq, uptodate, name)) + ; + end_that_request_last(rq); +} + + +void pkt_kill_request(struct request *rq, int uptodate, char *name, char *msg) +{ + printk("pktcdvd: killing request, reason: %s\n", msg); + spin_lock_irq(&io_request_lock); + __pkt_kill_request(rq, uptodate, name); + spin_unlock_irq(&io_request_lock); +} + +/* + * fill in the holes of a request + * + * Returns: 0, keep 'em coming -- 1, stop queueing + */ +static int pkt_gather_data(struct pktcdvd_device *pd, struct request *rq) +{ + unsigned long start_s, end_s, sector; + struct buffer_head *bh; + unsigned int sectors; + + /* + * all calculations are done with 512 byte sectors + */ + sectors = pd->settings.size - rq->nr_sectors; + start_s = rq->sector - (rq->sector % pd->settings.size); + end_s = start_s + pd->settings.size; + + VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev)); + VPRINTK("from %lu to %lu ", start_s, end_s); + VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector + + rq->current_nr_sectors); + + if (blksize_size[MAJOR(pd->dev)]) { + if (rq->bh->b_size != blksize_size[MAJOR(pd->dev)][MINOR(pd->dev)]) { + printk("pktcdvd: wrong (%u) block size\n", rq->bh->b_size); + pkt_kill_request(rq, 0, pd->name, "eek"); + pd_lock(pd, 0); + pd->rq = NULL; + pd_unlock(pd); + return 1; + } + } + + /* + * get remaining blocks + */ + bh = rq->bh; + for (sector = start_s; sector < end_s; sector += (bh->b_size >> 9)) { + struct buffer_head *foo_bh; + + if (sector < bh->b_rsector) + goto new; + + if (sector == bh->b_rsector) + continue; + + if (bh->b_reqnext && NEXT_BH(bh, bh->b_reqnext)) { + bh = bh->b_reqnext; + continue; + } + + /* + * new buffer -- first search the buffer cache, if it's + * not there grab one from our pool + */ + new: + foo_bh = pkt_get_buffer(pd, sector, bh->b_size); + if (!buffer_uptodate(foo_bh)) + pkt_read_bh(pd, foo_bh); + + if (sector < bh->b_rsector) { + foo_bh->b_reqnext = bh; + rq->bh = foo_bh; + } else { + foo_bh->b_reqnext = bh->b_reqnext; + bh->b_reqnext = foo_bh; + } + bh = foo_bh; + rq->nr_sectors += (bh->b_size >> 9); + if (bh->b_rsector > rq->bhtail->b_rsector) + rq->bhtail = bh; + } + + rq->buffer = rq->bh->b_data; + rq->current_nr_sectors = rq->bh->b_size >> 9; + rq->hard_nr_sectors = rq->nr_sectors; + rq->sector = rq->hard_sector = start_s; + rq->cmd = WRITE_PACKET; + + VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector); + pkt_recheck_segments(rq); + pkt_init_bh(pd, rq); + pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors); + + /* + * sanity check + */ + if (rq->nr_sectors != pd->settings.size) { + printk("pktcdvd: request mismatch %lu (should be %u)\n", + rq->nr_sectors, pd->settings.size); + BUG(); + } + return 0; +} + +static inline void pkt_plug_queue(request_queue_t *q, kdev_t dev) +{ + if (list_empty(&q->queue_head)) + q->plug_device_fn(q, dev); +} + +static void pkt_rq_end_io(struct request *rq) +{ + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); + + pd_lock(pd, 1); + + if (pd->rq == NULL) + printk("rq_end_io: no current rq\n"); + + pd->rq = NULL; + rq->end_io = NULL; + + if (!test_and_clear_bit(PACKET_BUSY, &pd->flags)) + printk("rq_end_io: BUSY not set\n"); + + if (!test_and_clear_bit(PACKET_RQ, &pd->flags)) + printk("rq_end_io: RQ not set\n"); + + pd_unlock(pd); + wake_up(&pd->wqueue); +} + +static inline void __pkt_inject_request(request_queue_t *q, struct request *rq) +{ + int lat = elevator_request_latency(&q->elevator, rq->cmd); + struct list_head *head = &q->queue_head; + + pkt_plug_queue(q, rq->rq_dev); + + if (q->head_active && !q->plugged) + head = head->next; + + q->elevator.elevator_fn(rq, &q->elevator, &q->queue_head, head, lat); +} + +static void pkt_inject_request(request_queue_t *q, struct request *rq) +{ + rq->end_io = pkt_rq_end_io; + spin_lock_irq(&io_request_lock); + __pkt_inject_request(q, rq); + spin_unlock_irq(&io_request_lock); +} + +/* + * Returns: 1, keep 'em coming -- 0, wait for wakeup + */ +static int pkt_do_request(struct pktcdvd_device *pd, struct request *rq, + request_queue_t *pdq) +{ + int ret; + + /* + * perfect match. the merge_* functions have already made sure that + * a request doesn't cross a packet boundary, so if the sector + * count matches it's good. + */ + if (rq->nr_sectors == pd->settings.size) { + rq->cmd = WRITE_PACKET; + pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors); + return 0; + } + + /* + * paranoia... + */ + if (rq->nr_sectors > pd->settings.size) { + printk("pktcdvd: request too big! BUG! %lu\n", rq->nr_sectors); + BUG(); + } + + ret = pkt_gather_data(pd, rq); + if (ret) { + clear_bit(PACKET_RQ, &pd->flags); + clear_bit(PACKET_BUSY, &pd->flags); + pd_lock(pd, 0); + pd->rq = NULL; + pd_unlock(pd); + } + return ret; +} + +/* + * handle the requests that got queued for this writer + * + * Locks: none + * + */ +static int pkt_handle_queue(struct pktcdvd_device *pd, request_queue_t *q) +{ + struct request *rq; + int ret; + + pd_lock(pd, 0); + + /* + * nothing for us to do + */ + if (!test_bit(PACKET_RQ, &pd->flags)) { + pd_unlock(pd); + return 1; + } + + if (test_and_set_bit(PACKET_BUSY, &pd->flags)) { + pd_unlock(pd); + return 1; + } + + rq = pd->rq; + + pd_unlock(pd); + + /* + * nothing to do + */ + ret = 1; + if (rq == NULL) { + printk("handle_queue: pd BUSY+RQ, but no rq\n"); + clear_bit(PACKET_RQ, &pd->flags); + goto out; + } + + /* + * reads are shipped directly to cd-rom, so they should not + * pop up here + */ + if (rq->cmd == READ) + BUG(); + + if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) { + pkt_kill_request(rq, 0, pd->name, "wrong size"); + clear_bit(PACKET_RQ, &pd->flags); + pd->rq = NULL; + goto out; + } + + if (!pkt_do_request(pd, rq, q)) { + pkt_inject_request(q, rq); + return 0; + } + +out: + clear_bit(PACKET_BUSY, &pd->flags); + return ret; +} + +/* + * kpacketd is woken up, when writes have been queued for one of our + * registered devices + */ +static int kcdrwd(void *foobar) +{ + struct pktcdvd_device *pd = foobar; + request_queue_t *q, *my_queue; + + set_bit(PACKET_THREAD, &pd->flags); + daemonize(); + exit_files(current); + + printk("pktcdvd: kernel thread %s started\n", pd->name); + + current->session = 1; + current->pgrp = 1; + current->policy = SCHED_OTHER; + current->nice = -20; + sprintf(current->comm, pd->name); + + spin_lock_irq(¤t->sigmask_lock); + siginitsetinv(¤t->blocked, sigmask(SIGKILL)); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); + + q = blk_get_queue(pd->dev); + my_queue = blk_get_queue(pd->pkt_dev); + + for (;;) { + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(&pd->wqueue, &wait); + + /* + * if pkt_handle_queue returns true, we can queue + * another request. otherwise we need to unplug the + * cd-rom queue and wait for buffers to be flushed + * (which will then wake us up again when done). + */ + do { + if (!pkt_handle_queue(pd, q)) + break; + + spin_lock_irq(&io_request_lock); + if (list_empty(&my_queue->queue_head)) { + spin_unlock_irq(&io_request_lock); + break; + } + my_queue->request_fn(my_queue); + spin_unlock_irq(&io_request_lock); + } while (1); + + set_current_state(TASK_INTERRUPTIBLE); + generic_unplug_device(q); + + schedule(); + remove_wait_queue(&pd->wqueue, &wait); + + /* + * got SIGKILL + */ + if (signal_pending(current)) { + printk("pktcdvd: thread got SIGKILL\n"); + break; + } + + } + printk("pktcdvd: kernel thread %s stopped\n", pd->name); + clear_bit(PACKET_THREAD, &pd->flags); + return 0; +} + +/* + * our request function. + * + * - reads are just tossed directly to the device, we don't care. + * - writes, regardless of size, are added as the current pd rq and + * kcdrwd is woken up to handle it. kcdrwd will also make sure to + * reinvoke this request handler, once the given request has been + * processed. + * + * Locks: io_request_lock held + * + * Notes: all writers have their own queue, so all requests are for the + * the same device + */ +static void pkt_request(request_queue_t *q) +{ + struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata; + request_queue_t *pdq = NULL; + + if (list_empty(&q->queue_head)) + return; + + pdq = __blk_get_queue(pd->dev); + + while (!list_empty(&q->queue_head)) { + struct request *rq = blkdev_entry_next_request(&q->queue_head); + + rq->rq_dev = pd->dev; + + if (rq->cmd == READ) { + blkdev_dequeue_request(rq); + __pkt_inject_request(pdq, rq); + continue; + } + + /* + * UDF had a bug, where it submitted a write to a ro file + * system, this is just to prevent accidents like that from + * happening again + */ + if (test_bit(PACKET_READONLY, &pd->flags)) { + blkdev_dequeue_request(rq); + __pkt_kill_request(rq, 0, pd->name); + continue; + } + + /* + * paranoia, shouldn't trigger... + */ + if (!pkt_same_zone(pd, rq)) + BUG(); + + pd_lock(pd, 1); + + /* + * already gathering data for another read. the + * rfn will be reinvoked once that is done + */ + if (test_and_set_bit(PACKET_RQ, &pd->flags)) { + pd_unlock(pd); + break; + } + + pd->rq = rq; + pd_unlock(pd); + blkdev_dequeue_request(rq); + } + + wake_up(&pd->wqueue); +} + +static void pkt_print_settings(struct pktcdvd_device *pd) +{ + printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable"); + printk("%u blocks / packet, ", pd->settings.size >> 2); + printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2'); +} + +/* + * A generic sense dump / resolve mechanism should be implemented across + * all ATAPI + SCSI devices. + */ +static void pkt_dump_sense(struct request_sense *sense) +{ + char *info[9] = { "No sense", "Recovered error", "Not ready", + "Medium error", "Hardware error", "Illegal request", + "Unit attention", "Data protect", "Blank check" }; + + if (sense == NULL) + return; + + if (sense->sense_key > 8) { + printk("pktcdvd: sense invalid\n"); + return; + } + + printk("pktcdvd: sense category %s ", info[sense->sense_key]); + printk("asc(%02x), ascq(%02x)\n", sense->asc, sense->ascq); +} + +/* + * write mode select package based on pd->settings + */ +static int pkt_set_write_settings(struct pktcdvd_device *pd) +{ + struct cdrom_device_info *cdi = pd->cdi; + struct cdrom_generic_command cgc; + write_param_page *wp; + char buffer[128]; + int ret, size; + + memset(buffer, 0, sizeof(buffer)); + init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); + if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) + return ret; + + size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); + pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); + if (size > sizeof(buffer)) + size = sizeof(buffer); + + /* + * now get it all + */ + init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); + if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) + return ret; + + /* + * write page is offset header + block descriptor length + */ + wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; + + wp->fp = pd->settings.fp; + wp->track_mode = pd->settings.track_mode; + wp->write_type = pd->settings.write_type; + wp->data_block_type = pd->settings.block_mode; + + wp->multi_session = 0; + +#ifdef PACKET_USE_LS + wp->link_size = 7; + wp->ls_v = 1; +#endif + + if (wp->data_block_type == PACKET_BLOCK_MODE1) { + wp->session_format = 0; + wp->subhdr2 = 0x20; + } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { + wp->session_format = 0x20; + wp->subhdr2 = 8; +#if 0 + wp->mcn[0] = 0x80; + memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); +#endif + } else { + /* + * paranoia + */ + printk("pktcdvd: write mode wrong %d\n", wp->data_block_type); + return 1; + } + wp->packet_size = cpu_to_be32(pd->settings.size >> 2); + + cgc.buflen = cgc.cmd[8] = size; + if ((ret = cdrom_mode_select(cdi, &cgc))) { + pkt_dump_sense(cgc.sense); + return ret; + } + + pkt_print_settings(pd); + return 0; +} + +/* + * 0 -- we can write to this track, 1 -- we can't + */ +static int pkt_good_track(track_information *ti) +{ + /* + * only good for CD-RW at the moment, not DVD-RW + */ + + /* + * FIXME: only for FP + */ + if (ti->fp == 0) + return 0; + + /* + * "good" settings as per Mt Fuji. + */ + if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1) + return 0; + + if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1) + return 0; + + if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1) + return 0; + + printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); + return 1; +} + +/* + * 0 -- we can write to this disc, 1 -- we can't + */ +static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) +{ + /* + * for disc type 0xff we should probably reserve a new track. + * but i'm not sure, should we leave this to user apps? probably. + */ + if (di->disc_type == 0xff) { + printk("pktcdvd: Unknown disc. No track?\n"); + return 1; + } + + if (di->disc_type != 0x20 && di->disc_type != 0) { + printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type); + return 1; + } + + if (di->erasable == 0) { + printk("pktcdvd: Disc not erasable\n"); + return 1; + } + + if (pd->track_status == PACKET_SESSION_RESERVED) { + printk("pktcdvd: Can't write to last track (reserved)\n"); + return 1; + } + + return 0; +} + +static int pkt_probe_settings(struct pktcdvd_device *pd) +{ + disc_information di; + track_information ti; + int ret, track; + + memset(&di, 0, sizeof(disc_information)); + memset(&ti, 0, sizeof(track_information)); + + if ((ret = cdrom_get_disc_info(pd->dev, &di))) { + printk("failed get_disc\n"); + return ret; + } + + pd->disc_status = di.disc_status; + pd->track_status = di.border_status; + + if (pkt_good_disc(pd, &di)) + return -ENXIO; + + printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : ""); + pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; + + track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ + if ((ret = cdrom_get_track_info(pd->dev, track, 1, &ti))) { + printk("pktcdvd: failed get_track\n"); + return ret; + } + + if (pkt_good_track(&ti)) { + printk("pktcdvd: can't write to this track\n"); + return -ENXIO; + } + + /* + * we keep packet size in 512 byte units, makes it easier to + * deal with request calculations. + */ + pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; + if (pd->settings.size == 0) { + printk("pktcdvd: detected zero packet size!\n"); + pd->settings.size = 128; + } + pd->settings.fp = ti.fp; + pd->offset = (be32_to_cpu(ti.track_start) << 2) % pd->settings.size; + + if (ti.nwa_v) { + pd->nwa = be32_to_cpu(ti.next_writable); + set_bit(PACKET_NWA_VALID, &pd->flags); + } + + /* + * in theory we could use lra on -RW media as well and just zero + * blocks that haven't been written yet, but in practice that + * is just a no-go. we'll use that for -R, naturally. + */ + if (ti.lra_v) { + pd->lra = be32_to_cpu(ti.last_rec_address); + set_bit(PACKET_LRA_VALID, &pd->flags); + } else { + pd->lra = 0xffffffff; + set_bit(PACKET_LRA_VALID, &pd->flags); + } + + /* + * fine for now + */ + pd->settings.link_loss = 7; + pd->settings.write_type = 0; /* packet */ + pd->settings.track_mode = ti.track_mode; + + /* + * mode1 or mode2 disc + */ + switch (ti.data_mode) { + case PACKET_MODE1: + pd->settings.block_mode = PACKET_BLOCK_MODE1; + break; + case PACKET_MODE2: + pd->settings.block_mode = PACKET_BLOCK_MODE2; + break; + default: + printk("pktcdvd: unknown data mode\n"); + return 1; + } + return 0; +} + +/* + * enable/disable write caching on drive + */ +static int pkt_write_caching(struct pktcdvd_device *pd, int set) +{ + struct cdrom_generic_command cgc; + unsigned char buf[64]; + int ret; + + memset(buf, 0, sizeof(buf)); + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); + cgc.buflen = pd->mode_offset + 12; + + if ((ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WCACHING_PAGE, 0))) + return ret; + + buf[pd->mode_offset + 10] |= (!!set << 2); + + cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); + if (!(ret = cdrom_mode_select(pd->cdi, &cgc))) + printk("pktcdvd: %sabled write caching on %s\n", set ? "en" : "dis", pd->name); + return ret; +} + +/* + * flush the drive cache to media + */ +static int pkt_flush_cache(struct pktcdvd_device *pd) +{ + struct cdrom_generic_command cgc; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; + cgc.quiet = 1; + + /* + * the IMMED bit -- we default to not setting it, although that + * would allow a much faster close + */ +#if 0 + cgc.cmd[1] = 1 << 1; +#endif + return pd->cdi->ops->generic_packet(pd->cdi, &cgc); +} + +/* + * Returns drive current write speed + */ +static int pkt_get_speed(struct pktcdvd_device *pd) +{ + struct cdrom_generic_command cgc; + unsigned char buf[64]; + int ret, offset; + + memset(buf, 0, sizeof(buf)); + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); + + ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); + if (ret) { + cgc.buflen = pd->mode_offset + buf[pd->mode_offset + 9] + 2 + + sizeof(struct mode_page_header); + ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); + if (ret) + return ret; + } + + offset = pd->mode_offset + 26; + pd->speed = ((buf[offset] << 8) | buf[offset + 1]) / 0xb0; + return 0; +} + +/* + * speed is given as the normal factor, e.g. 4 for 4x + */ +static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed) +{ + struct cdrom_generic_command cgc; + unsigned read_speed; + + /* + * we set read and write time so that read spindle speed is one and + * a half as fast as write. although a drive can typically read much + * faster than write, this minimizes the spin up/down when we write + * and gather data. maybe 1/1 factor is faster, needs a bit of testing. + */ + speed = speed * 0xb0; + read_speed = (speed * 3) >> 1; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = 0xbb; + cgc.cmd[2] = (read_speed >> 8) & 0xff; + cgc.cmd[3] = read_speed & 0xff; + cgc.cmd[4] = (speed >> 8) & 0xff; + cgc.cmd[5] = speed & 0xff; + + return pd->cdi->ops->generic_packet(pd->cdi, &cgc); +} + +/* + * Give me full power, Captain + */ +static int pkt_max_speed(struct pktcdvd_device *pd) +{ + disc_information di; + int ret; + + /* + * FIXME: do proper unified cap page, also, this isn't proper + * Mt Fuji, but I think we can safely assume all drives support + * it. A hell of a lot more than support the GET_PERFORMANCE + * command (besides, we also use the old set speed command, + * not the streaming feature). + */ + if ((ret = pkt_set_speed(pd, 8))) + return ret; + + /* + * just do something with the disc -- next read will contain the + * maximum speed with this media + */ + if ((ret = cdrom_get_disc_info(pd->dev, &di))) + return ret; + + if ((ret = pkt_get_speed(pd))) { + printk("pktcdvd: failed get speed\n"); + return ret; + } + + DPRINTK("pktcdvd: speed (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed); + return 0; +} + +static int pkt_lock_tray(struct pktcdvd_device *pd, int lock) +{ + return pd->cdi->ops->lock_door(pd->cdi, !!lock); +} + +#if 0 +static int pkt_track_capacity(struct pktcdvd_device *pd) +{ + disc_information di; + track_information ti; + int l_track, i, ret; + unsigned long size = 0; + + memset(&di, 0, sizeof(disc_information)); + memset(&ti, 0, sizeof(track_information)); + + if ((ret = cdrom_get_disc_info(pd->dev, &di))) { + DPRINTK("failed get_disc\n"); + return ret; + } + + l_track = di.last_track_lsb | di.last_track_msb >> 8; + DPRINTK("pktcdvd: last track %d\n", l_track); + for (i = di.n_first_track; i <= l_track; i++) { + if ((ret = cdrom_get_track_info(pd->dev, i, 1, &ti))) { + DPRINTK("pktcdvd: failed get_track\n"); + return ret; + } + size += be32_to_cpu(ti.track_size); + } + pkt_sizes[MINOR(pd->pkt_dev)] = size << 1; + return 0; +} + +static int pkt_set_capacity(struct pktcdvd_device *pd) +{ + struct cdrom_generic_command cgc; + struct cdrom_device_info *cdi = pd->cdi; + struct cdvd_capacity cap; + int ret; + + init_cdrom_command(&cgc, &cap, sizeof(cap)); + cgc.cmd[0] = GPCMD_READ_CDVD_CAPACITY; + if ((ret = cdi->ops->generic_packet(cdi, &cgc))) + return ret; + + /* + * We should probably give up if read capacity fails, since then + * then disc is not ready to be written to -- for now I use + * raw devices and this is fine. + */ + pkt_sizes[MINOR(pd->pkt_dev)] = be32_to_cpu(cap.lba) << 1; + return 0; +} +#endif + +static int pkt_open_write(struct pktcdvd_device *pd) +{ + int ret; + + if ((ret = pkt_probe_settings(pd))) { + DPRINTK("pktcdvd: %s failed probe\n", pd->name); + return -EIO; + } + + if ((ret = pkt_set_write_settings(pd))) { + DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name); + return -EIO; + } + + (void) pkt_write_caching(pd, USE_WCACHING); + + if ((ret = pkt_max_speed(pd))) { + DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name); + return -EIO; + } + return 0; +} + +/* + * called at open time. return 1 if the device can only be opened read-only. + */ +static int pkt_open_dev(struct pktcdvd_device *pd, int write) +{ + int ret; + long lba; + + if (!pd->dev) + return 0; + + if ((ret = cdrom_get_last_written(pd->dev, &lba))) + return ret; + + pkt_sizes[MINOR(pd->pkt_dev)] = 1 + (lba << 1); + + if (write) { + if ((ret = pkt_open_write(pd))) + return ret; + clear_bit(PACKET_READONLY, &pd->flags); + } else { + if ((ret = pkt_max_speed(pd))) + return ret; + set_bit(PACKET_READONLY, &pd->flags); + } + + if (write) + printk("pktcdvd: %luKB available on disc\n", lba << 1); + return 0; +} + +/* + * called when the device is closed. makes sure that the device flushes + * the internal cache before we close. + */ +static void pkt_release_dev(struct pktcdvd_device *pd, int flush) +{ + fsync_dev(pd->pkt_dev); + invalidate_buffers(pd->pkt_dev); + + if (flush) + if (pkt_flush_cache(pd)) + DPRINTK("pktcdvd: %s not flushing cache\n", pd->name); + + atomic_dec(&pd->refcnt); +} + +static int pkt_open(struct inode *inode, struct file *file) +{ + struct pktcdvd_device *pd = NULL; + int ret = 0; + + VPRINTK("pktcdvd: entering open\n"); + + MOD_INC_USE_COUNT; + + /* + * should this really be necessary?? + */ + if (!inode) { + MOD_DEC_USE_COUNT; + return -EINVAL; + } + + if (MINOR(inode->i_rdev) >= MAX_WRITERS) { + printk("pktcdvd: max %d writers supported\n", MAX_WRITERS); + MOD_DEC_USE_COUNT; + return -ENODEV; + } + + /* + * either device is not configured, or pktsetup is old and doesn't + * use O_CREAT to create device + */ + pd = &pkt_devs[MINOR(inode->i_rdev)]; + if (!pd->dev && !(file->f_flags & O_CREAT)) { + ret = -ENXIO; + goto out_dec; + } + + ret = -EBUSY; + atomic_inc(&pd->refcnt); + if ((atomic_read(&pd->refcnt) > 1) && (file->f_mode & FMODE_WRITE)) + goto out; + + ret = -EIO; + if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) + goto out; + + /* + * needed here as well, since ext2 (among others) may change + * the blocksize at mount time + */ + set_blocksize(pd->pkt_dev, CD_FRAMESIZE); + return 0; +out_dec: + atomic_dec(&pd->refcnt); +out: + VPRINTK("pktcdvd: failed open\n"); + MOD_DEC_USE_COUNT; + return ret; +} + +static int pkt_close(struct inode *inode, struct file *file) +{ + struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)]; + int ret = 0; + + if (pd->dev) + pkt_release_dev(pd, 1); + MOD_DEC_USE_COUNT; + return ret; +} + +/* + * pktcdvd i/o elevator + * + * rules: always merge whenever possible, and support hole merges + */ +static void pkt_elevator(struct request *rq, elevator_t *elevator, + struct list_head *real_head, struct list_head *head, + int orig_latency) +{ + struct list_head *entry = real_head; + struct request *tmp; + int pass = 0; + + while ((entry = entry->prev) != head) { + tmp = blkdev_entry_to_request(entry); + if (IN_ORDER(tmp, rq) || (pass && !IN_ORDER(tmp, blkdev_next_request(tmp)))) + break; + pass = 1; + } + list_add(&rq->queue, entry); +} + +static int pkt_elevator_merge(request_queue_t *q, struct request **rq, + struct list_head *head, struct buffer_head *bh, + int rw, int max_secs, int max_segs) +{ + unsigned int count = bh->b_size >> 9; + int ret = ELEVATOR_NO_MERGE; + struct list_head *entry; + + entry = &q->queue_head; + while ((entry = entry->prev) != head && !ret) { + struct request *__rq = *rq = blkdev_entry_to_request(entry); + if (__rq->sem) + continue; + if (__rq->cmd != rw) + continue; + if (__rq->nr_sectors + count > max_secs) + continue; + if (__rq->rq_dev != bh->b_rdev) + continue; + if (__rq->sector + __rq->nr_sectors == bh->b_rsector) + ret = ELEVATOR_BACK_MERGE; + else if (__rq->sector - count == bh->b_rsector) + ret = ELEVATOR_FRONT_MERGE; + else if (q->hole_merge_fn(q, __rq, bh, max_segs)) + ret = ELEVATOR_HOLE_MERGE; + } + return ret; +} + +static void pkt_init_queue(struct pktcdvd_device *pd) +{ + request_queue_t *q = &pd->cdrw.r_queue; + + blk_init_queue(q, pkt_request); + blk_queue_headactive(q, 0); + elevator_init(&q->elevator, ELEVATOR_PKTCDVD); + q->front_merge_fn = pkt_front_merge_fn; + q->back_merge_fn = pkt_back_merge_fn; + q->hole_merge_fn = pkt_hole_merge_fn; + q->merge_requests_fn = pkt_merge_requests_fn; + q->queuedata = pd; +} + +static int pkt_new_dev(struct pktcdvd_device *pd, kdev_t dev) +{ + struct cdrom_device_info *cdi; + request_queue_t *q; + int i; + + for (i = 0; i < MAX_WRITERS; i++) { + if (pkt_devs[i].dev == dev) { + printk("pktcdvd: %s already setup\n", kdevname(dev)); + return -EBUSY; + } + } + + for (i = 0; i < MAX_WRITERS; i++) + if (pd == &pkt_devs[i]) + break; + + if (i == MAX_WRITERS) { + printk("pktcdvd: max %d writers supported\n", MAX_WRITERS); + return -ENXIO; + } + + cdi = cdrom_find_device(dev); + if (cdi == NULL) { + printk("pktcdvd: %s is not a CD-ROM\n", kdevname(dev)); + return -ENXIO; + } + + MOD_INC_USE_COUNT; + + memset(pd, 0, sizeof(struct pktcdvd_device)); + atomic_set(&pd->cdrw.pending_bh, 0); + atomic_set(&pd->cdrw.free_bh, 0); + spin_lock_init(&pd->lock); + if (pkt_grow_bhlist(pd, PACKET_MAX_SIZE) < PACKET_MAX_SIZE) { + printk("pktcdvd: not enough memory for buffers\n"); + return -ENOMEM; + } + set_blocksize(dev, CD_FRAMESIZE); + pd->cdi = cdi; + pd->dev = dev; + pd->pkt_dev = MKDEV(PACKET_MAJOR, i); + sprintf(pd->name, "pktcdvd%d", i); + atomic_set(&pd->refcnt, 0); + init_waitqueue_head(&pd->wqueue); + init_waitqueue_head(&pd->lock_wait); + + /* + * store device merge functions (SCSI uses their own to build + * scatter-gather tables) + */ + q = blk_get_queue(dev); + spin_lock_irq(&io_request_lock); + pkt_init_queue(pd); + pd->cdrw.front_merge_fn = q->front_merge_fn; + pd->cdrw.back_merge_fn = q->back_merge_fn; + pd->cdrw.merge_requests_fn = q->merge_requests_fn; + pd->cdrw.queuedata = q->queuedata; + spin_unlock_irq(&io_request_lock); + + pd->cdrw.pid = kernel_thread(kcdrwd, pd, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); + + DPRINTK("pktcdvd: writer %s sucessfully registered\n", cdi->name); + return 0; +} + +/* + * arg contains file descriptor of CD-ROM device. + */ +static int pkt_setup_dev(struct pktcdvd_device *pd, unsigned int arg) +{ + struct inode *inode; + struct file *file; + int ret; + + if ((file = fget(arg)) == NULL) { + printk("pktcdvd: bad file descriptor passed\n"); + return -EBADF; + } + + ret = -EINVAL; + if ((inode = file->f_dentry->d_inode) == NULL) { + printk("pktcdvd: huh? file descriptor contains no inode?\n"); + goto out; + } + ret = -ENOTBLK; + if (!S_ISBLK(inode->i_mode)) { + printk("pktcdvd: device is not a block device (duh)\n"); + goto out; + } + ret = blkdev_get(inode->i_bdev, file->f_mode, file->f_flags, BDEV_FILE); + if (ret) + goto out; + ret = -EROFS; + if (IS_RDONLY(inode)) { + printk("pktcdvd: Can't write to read-only dev\n"); + goto out; + } + if ((ret = pkt_new_dev(pd, inode->i_rdev))) { + printk("pktcdvd: all booked up\n"); + goto out; + } + + pd->pkt_dentry = dget(file->f_dentry); + atomic_inc(&pd->refcnt); + + if ((ret = pkt_lock_tray(pd, 1))) + printk("pktcdvd: can't lock drive tray\n"); + +out: + fput(file); + return ret; +} + +static int pkt_remove_dev(struct pktcdvd_device *pd) +{ + int ret; + + /* + * will also invalidate buffers for CD-ROM + */ + blkdev_put(pd->pkt_dentry->d_inode->i_bdev, BDEV_FILE); + dput(pd->pkt_dentry); + invalidate_buffers(pd->pkt_dev); + + /* + * Unlock CD-ROM device + */ + (void) pkt_lock_tray(pd, 0); + + if ((ret = pkt_shrink_bhlist(pd, PACKET_MAX_SIZE)) != PACKET_MAX_SIZE) + printk("pktcdvd: leaked %d buffers\n", PACKET_MAX_SIZE - ret); + + if ((ret = kill_proc(pd->cdrw.pid, SIGKILL, 1)) == 0) { + int count = 10; + while (test_bit(PACKET_THREAD, &pd->flags) && --count) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ / 10); + } + if (!count) + printk("pkt_exit: can't kill kernel thread\n"); + } + + blk_cleanup_queue(&pd->cdrw.r_queue); + DPRINTK("pktcdvd: writer %s unregistered\n", pd->cdi->name); + memset(pd, 0, sizeof(struct pktcdvd_device)); + MOD_DEC_USE_COUNT; + return 0; +} + +static int pkt_media_change(kdev_t dev) +{ + struct pktcdvd_device *pd = pkt_find_dev(dev); + struct cdrom_device_info *cdi = pd->cdi; + + if (pd == NULL) + return 0; + + return cdi->ops->dev_ioctl(cdi, CDROM_MEDIA_CHANGED, CDSL_CURRENT); +} + +static int pkt_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)]; + + if ((cmd != PACKET_SETUP_DEV) && !pd->dev) { + DPRINTK("pktcdvd: dev not setup\n"); + return -ENXIO; + } + + switch (cmd) { + case PACKET_GET_STATS: + if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats))) + return -EFAULT; + + case PACKET_SETUP_DEV: + if (pd->dev) { + printk("pktcdvd: dev already setup\n"); + return -EBUSY; + } + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return pkt_setup_dev(pd, arg); + + case PACKET_TEARDOWN_DEV: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (atomic_read(&pd->refcnt) != 1) + return -EBUSY; + return pkt_remove_dev(pd); + + case PACKET_WAKEUP: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + wake_up(&pd->wqueue); + + case BLKGETSIZE: + return put_user(blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 1, (long *)arg); + + case BLKROSET: + case BLKROGET: + case BLKSSZGET: + case BLKRASET: + case BLKRAGET: + case BLKFLSBUF: + return blk_ioctl(inode->i_rdev, cmd, arg); + + /* + * forward selected CDROM ioctls to CD-ROM, for UDF + */ + case CDROMMULTISESSION: + case CDROMREADTOCENTRY: + case CDROM_LAST_WRITTEN: + case CDROM_SEND_PACKET: + return pd->cdi->ops->dev_ioctl(pd->cdi, cmd, arg); + + default: + printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd); + return -ENOTTY; + } + + return 0; +} + +static struct block_device_operations pktcdvd_ops = { + open: pkt_open, + release: pkt_close, + ioctl: pkt_ioctl, + check_media_change: pkt_media_change, +}; + +static int list_nr_items(struct pktcdvd_device *pd, struct list_head *head, + spinlock_t *lock) +{ + struct list_head *foo; + int i; + + spin_lock_irq(lock); + if (list_empty(head)) { + spin_unlock_irq(lock); + return 0; + } + + i = 0; + list_for_each(foo, head) + i++; + + spin_unlock_irq(lock); + return i; +} + +static int pkt_proc_device(struct pktcdvd_device *pd, char *buf) +{ + char *b = buf, *msg; + + b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev)); + + b += sprintf(b, "\nSettings:\n"); + b += sprintf(b, "\tpacket size:\t\t%dKB\n", pd->settings.size / 2); + + if (pd->settings.write_type == 0) + msg = "Packet"; + else + msg = "Unknown"; + b += sprintf(b, "\twrite type:\t\t%s\n", msg); + + b += sprintf(b, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); + b += sprintf(b, "\tlink loss:\t\t%d\n", pd->settings.link_loss); + + b += sprintf(b, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); + + if (pd->settings.block_mode == PACKET_BLOCK_MODE1) + msg = "Mode 1"; + else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) + msg = "Mode 2"; + else + msg = "Unknown"; + b += sprintf(b, "\tblock mode:\t\t%s\n", msg); + + b += sprintf(b, "\nStatistics:\n"); + b += sprintf(b, "\tbuffers started:\t%lu\n", pd->stats.bh_s); + b += sprintf(b, "\tbuffers ended:\t\t%lu\n", pd->stats.bh_e); + b += sprintf(b, "\tsectors written:\t%lu\n", pd->stats.secs_w); + b += sprintf(b, "\tsectors read:\t\t%lu\n", pd->stats.secs_r); + + b += sprintf(b, "\nMisc:\n"); + b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt)); + b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags); + b += sprintf(b, "\twrite speed:\t\t%uKB/sec\n", pd->speed * 150); + b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset); + b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset); + + b += sprintf(b, "\nQueue state:\n"); + b += sprintf(b, "\tfree buffers:\t\t%u\n", atomic_read(&pd->cdrw.free_bh)); + b += sprintf(b, "\tpending buffers:\t%u\n", atomic_read(&pd->cdrw.pending_bh)); + + b += sprintf(b, "\trequest active:\t\t%s\n", pd->rq ? "yes" : "no"); + b += sprintf(b, "\tqueue requests:\t\t%u\n", list_nr_items(pd, &pd->cdrw.r_queue.queue_head, &io_request_lock)); + + return b - buf; +} + +static int pkt_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct pktcdvd_device *pd; + char *buf = page; + int len, i; + + len = sprintf(buf, "%s\n", VERSION_CODE); + buf += len; + + for (i = 0; i < MAX_WRITERS; i++) { + pd = &pkt_devs[i]; + if (pd->dev) { + len += pkt_proc_device(pd, buf); + buf += len; + } + } + + if (len <= off + count) + *eof = 1; + + *start = page + off; + len -= off; + if (len > count) + len = count; + if (len < 0) + len = 0; + + return len; +} + +int __init pkt_init(void) +{ + devfs_register(NULL, "pktcdvd", 0, DEVFS_FL_DEFAULT, PACKET_MAJOR, + S_IFBLK | S_IRUSR | S_IWUSR, &pktcdvd_ops, NULL); + if (devfs_register_blkdev(PACKET_MAJOR, "pktcdvd", &pktcdvd_ops)) { + printk("unable to register pktcdvd device\n"); + return -EIO; + } + + pkt_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL); + if (pkt_sizes == NULL) + goto err; + + pkt_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL); + if (pkt_blksize == NULL) + goto err; + + pkt_readahead = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL); + if (pkt_readahead == NULL) + goto err; + + pkt_devs = kmalloc(MAX_WRITERS * sizeof(struct pktcdvd_device), GFP_KERNEL); + if (pkt_devs == NULL) + goto err; + + memset(pkt_devs, 0, MAX_WRITERS * sizeof(struct pktcdvd_device)); + memset(pkt_sizes, 0, MAX_WRITERS * sizeof(int)); + memset(pkt_blksize, 0, MAX_WRITERS * sizeof(int)); + memset(pkt_readahead, 0, MAX_WRITERS * sizeof(int)); + + blk_size[PACKET_MAJOR] = pkt_sizes; + blksize_size[PACKET_MAJOR] = pkt_blksize; + max_readahead[PACKET_MAJOR] = pkt_readahead; + read_ahead[PACKET_MAJOR] = 128; + set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE); + + blk_dev[PACKET_MAJOR].queue = pkt_return_queue; + + create_proc_read_entry("driver/pktcdvd", 0, 0, pkt_read_proc, NULL); + + DPRINTK("pktcdvd: %s\n", VERSION_CODE); + return 0; + +err: + printk("pktcdvd: out of memory\n"); + devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0, + DEVFS_SPECIAL_BLK, 0)); + devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd"); + kfree(pkt_devs); + kfree(pkt_sizes); + kfree(pkt_blksize); + kfree(pkt_readahead); + return -ENOMEM; +} + +void __exit pkt_exit(void) +{ + devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0, + DEVFS_SPECIAL_BLK, 0)); + devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd"); + + remove_proc_entry("driver/pktcdvd", NULL); + kfree(pkt_sizes); + kfree(pkt_blksize); + kfree(pkt_devs); + kfree(pkt_readahead); +} + +MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); +MODULE_AUTHOR("Jens Axboe "); + +module_init(pkt_init); +module_exit(pkt_exit); diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/cdrom/cdrom.c linux/drivers/cdrom/cdrom.c --- /opt/kernel/linux-2.4.0-test10/drivers/cdrom/cdrom.c Mon Oct 30 08:41:32 2000 +++ linux/drivers/cdrom/cdrom.c Mon Oct 23 01:31:44 2000 @@ -224,11 +224,14 @@ 3.11 Jun 12, 2000 - Jens Axboe -- Fix bug in getting rpc phase 2 region info. -- Reinstate "correct" CDROMPLAYTRKIND - + + 3.12 Oct 18, 2000 - Jens Axboe + -- Use quiet bit on packet commands not known to work + -------------------------------------------------------------------------*/ -#define REVISION "Revision: 3.11" -#define VERSION "Id: cdrom.c 3.11 2000/06/12" +#define REVISION "Revision: 3.12" +#define VERSION "Id: cdrom.c 3.12 2000/10/18" /* I use an error-log mask to give fine grain control over the type of messages dumped to the system logs. The available masks include: */ @@ -953,6 +956,7 @@ cgc->buffer = (char *) buf; cgc->buflen = len; cgc->data_direction = type; + cgc->timeout = 5*HZ; } /* DVD handling */ @@ -1860,6 +1864,48 @@ return cdo->generic_packet(cdi, &cgc); } +static int cdrom_do_cmd(struct cdrom_device_info *cdi, + struct cdrom_generic_command *cgc) +{ + struct request_sense *usense, sense; + unsigned char *ubuf; + int ret; + + if (cgc->data_direction == CGC_DATA_UNKNOWN) + return -EINVAL; + + if (cgc->buflen < 0 || cgc->buflen >= 131072) + return -EINVAL; + + if ((ubuf = cgc->buffer)) { + cgc->buffer = kmalloc(cgc->buflen, GFP_KERNEL); + if (cgc->buffer == NULL) + return -ENOMEM; + } + + usense = cgc->sense; + cgc->sense = &sense; + if (usense && !access_ok(VERIFY_WRITE, usense, sizeof(*usense))) + return -EFAULT; + + if (cgc->data_direction == CGC_DATA_READ) { + if (!access_ok(VERIFY_READ, ubuf, cgc->buflen)) + return -EFAULT; + } else if (cgc->data_direction == CGC_DATA_WRITE) { + if (copy_from_user(cgc->buffer, ubuf, cgc->buflen)) { + kfree(cgc->buffer); + return -EFAULT; + } + } + + ret = cdi->ops->generic_packet(cdi, cgc); + __copy_to_user(usense, cgc->sense, sizeof(*usense)); + if (!ret && cgc->data_direction == CGC_DATA_READ) + __copy_to_user(ubuf, cgc->buffer, cgc->buflen); + kfree(cgc->buffer); + return ret; +} + static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg) { @@ -2117,52 +2163,11 @@ } case CDROM_SEND_PACKET: { - __u8 *userbuf, copy = 0; - struct request_sense *sense; if (!CDROM_CAN(CDC_GENERIC_PACKET)) return -ENOSYS; cdinfo(CD_DO_IOCTL, "entering CDROM_SEND_PACKET\n"); IOCTL_IN(arg, struct cdrom_generic_command, cgc); - copy = !!cgc.buflen; - userbuf = cgc.buffer; - cgc.buffer = NULL; - sense = cgc.sense; - if (userbuf != NULL && copy) { - /* usually commands just copy data one way, i.e. - * we send a buffer to the drive and the command - * specifies whether the drive will read or - * write to that buffer. usually the buffers - * are very small, so we don't loose that much - * by doing a redundant copy each time. */ - if (!access_ok(VERIFY_WRITE, userbuf, cgc.buflen)) { - printk("can't get write perms\n"); - return -EFAULT; - } - if (!access_ok(VERIFY_READ, userbuf, cgc.buflen)) { - printk("can't get read perms\n"); - return -EFAULT; - } - } - /* reasonable limits */ - if (cgc.buflen < 0 || cgc.buflen > 131072) { - printk("invalid size given\n"); - return -EINVAL; - } - if (copy) { - cgc.buffer = kmalloc(cgc.buflen, GFP_KERNEL); - if (cgc.buffer == NULL) - return -ENOMEM; - __copy_from_user(cgc.buffer, userbuf, cgc.buflen); - } - ret = cdo->generic_packet(cdi, &cgc); - if (copy && !ret) - __copy_to_user(userbuf, cgc.buffer, cgc.buflen); - /* copy back sense data */ - if (sense != NULL) - if (copy_to_user(sense, cgc.sense, sizeof(struct request_sense))) - ret = -EFAULT; - kfree(cgc.buffer); - return ret; + return cdrom_do_cmd(cdi, &cgc); } case CDROM_NEXT_WRITABLE: { long next = 0; @@ -2199,6 +2204,7 @@ cgc.cmd[4] = (track & 0xff00) >> 8; cgc.cmd[5] = track & 0xff; cgc.cmd[8] = 8; + cgc.quiet = 1; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; @@ -2220,6 +2226,7 @@ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); cgc.cmd[0] = GPCMD_READ_DISC_INFO; cgc.cmd[8] = cgc.buflen = 2; + cgc.quiet = 1; if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; @@ -2250,9 +2257,6 @@ int ret = -1; if (!CDROM_CAN(CDC_GENERIC_PACKET)) - goto use_toc; - - if (!CDROM_CAN(CDC_CD_R | CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM)) goto use_toc; if ((ret = cdrom_get_disc_info(dev, &di))) diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/i2o/i2o_block.c linux/drivers/i2o/i2o_block.c --- /opt/kernel/linux-2.4.0-test10/drivers/i2o/i2o_block.c Thu Jul 6 19:24:51 2000 +++ linux/drivers/i2o/i2o_block.c Mon Oct 23 01:31:44 2000 @@ -392,7 +392,6 @@ if (req->nr_segments < max_segments) { req->nr_segments++; - q->elevator.nr_segments++; return 1; } return 0; @@ -436,7 +435,6 @@ if (total_segments > max_segments) return 0; - q->elevator.nr_segments -= same_segment; req->nr_segments = total_segments; return 1; } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c --- /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-cd.c Tue Sep 5 13:46:15 2000 +++ linux/drivers/ide/ide-cd.c Mon Oct 23 01:31:44 2000 @@ -285,9 +285,11 @@ * 4.58 May 1, 2000 - Clean up ACER50 stuff. * - Fix small problem with ide_cdrom_capacity * + * 4.99 - Added write support for packet writing. + * *************************************************************************/ -#define IDECD_VERSION "4.58" +#define IDECD_VERSION "4.99" #include #include @@ -324,7 +326,6 @@ info->nsectors_buffered = 0; } - static void cdrom_analyze_sense_data(ide_drive_t *drive, struct packet_command *failed_command, @@ -348,6 +349,9 @@ return; } + if (failed_command->quiet) + return; + if (sense->error_code == 0x70 && sense->sense_key == 0x02 && ((sense->asc == 0x3a && sense->ascq == 0x00) || (sense->asc == 0x04 && sense->ascq == 0x01))) @@ -363,7 +367,7 @@ /* * If a read toc is executed for a CD-R or CD-RW medium where * the first toc has not been recorded yet, it will fail with - * 05/24/00 (which is a confusing error) + * 05/24/00 */ if (failed_command && failed_command->c[0] == GPCMD_READ_TOC_PMA_ATIP) if (sense->sense_key == 0x05 && sense->asc == 0x24) @@ -533,8 +537,10 @@ (struct packet_command *) pc->sense, (struct request_sense *) (pc->buffer - pc->c[4])); } - if (rq->cmd == READ && !rq->current_nr_sectors) - uptodate = 1; + + if (rq->cmd == READ || rq->cmd == WRITE_PACKET) + if (!rq->current_nr_sectors) + uptodate = 1; ide_end_request (uptodate, HWGROUP(drive)); } @@ -542,8 +548,8 @@ /* Returns 0 if the request should be continued. Returns 1 if the request was ended. */ -static int cdrom_decode_status (ide_startstop_t *startstop, ide_drive_t *drive, - int good_stat, int *stat_ret) +static int cdrom_decode_status(ide_startstop_t *startstop, ide_drive_t *drive, + int good_stat, int *stat_ret) { struct request *rq = HWGROUP(drive)->rq; int stat, err, sense_key; @@ -576,13 +582,13 @@ cdrom_end_request (1, drive); *startstop = ide_error (drive, "request sense failure", stat); return 1; - } else if (rq->cmd == PACKET_COMMAND) { /* All other functions, except for READ. */ + struct semaphore *sem = NULL; pc = (struct packet_command *) rq->buffer; - /* Check for tray open. */ + /* Check for tray open. */ if (sense_key == NOT_READY) { cdrom_saw_media_change (drive); } else if (sense_key == UNIT_ATTENTION) { @@ -590,9 +596,10 @@ cdrom_saw_media_change (drive); /*printk("%s: media changed\n",drive->name);*/ return 0; - } else { + } else if (!pc->quiet) { /* Otherwise, print an error. */ - ide_dump_status(drive, "packet command error", stat); + ide_dump_status (drive, "packet command error", + stat); } /* Set the error flag and complete the request. @@ -613,9 +620,10 @@ cdrom_end_request (1, drive); if ((stat & ERR_STAT) != 0) - cdrom_queue_request_sense(drive, sem, pc->sense, pc); + cdrom_queue_request_sense(drive, sem, pc->sense, + pc); } else { - /* Handle errors from READ requests. */ + /* Handle errors from READ and WRITE requests. */ if (sense_key == NOT_READY) { /* Tray open. */ @@ -666,11 +674,22 @@ struct packet_command *pc = (struct packet_command *) rq->buffer; unsigned long wait = 0; - /* blank and format can take an extremly long time to - * complete, if the IMMED bit was not set. + /* + * Some commands are *slow* and normally take a long time to + * complete. Usually we can use the ATAPI "disconnect" to bypass + * this, but not all commands/drives support that. Let + * ide_timer_expiry keep polling us for these. */ - if (pc->c[0] == GPCMD_BLANK || pc->c[0] == GPCMD_FORMAT_UNIT) - wait = 60*60*HZ; + switch (pc->c[0]) { + case GPCMD_BLANK: + case GPCMD_FORMAT_UNIT: + case GPCMD_RESERVE_RZONE_TRACK: + wait = WAIT_CMD; + break; + default: + wait = 0; + break; + } return wait; } @@ -684,7 +703,8 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, int xferlen, - ide_handler_t *handler) + ide_handler_t *handler, + int cmd) { ide_startstop_t startstop; struct cdrom_info *info = drive->driver_data; @@ -693,8 +713,15 @@ if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) return startstop; - if (info->dma) - info->dma = !HWIF(drive)->dmaproc(ide_dma_read, drive); + if (info->dma) { + if (cmd == READ) { + info->dma = !HWIF(drive)->dmaproc(ide_dma_read, drive); + } else if (cmd == WRITE) { + info->dma = !HWIF(drive)->dmaproc(ide_dma_write, drive); + } else { + printk("ide-cd: DMA set, but not allowed\n"); + } + } /* Set up the controller registers. */ OUT_BYTE (info->dma, IDE_FEATURE_REG); @@ -724,31 +751,32 @@ by cdrom_start_packet_command. HANDLER is the interrupt handler to call when the command completes or there's data ready. */ -static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive, +static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, unsigned char *cmd_buf, int cmd_len, - ide_handler_t *handler) + ide_handler_t *handler, + unsigned timeout) { + ide_startstop_t startstop; + if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) { /* Here we should have been called after receiving an interrupt from the device. DRQ should how be set. */ int stat_dum; - ide_startstop_t startstop; /* Check for errors. */ if (cdrom_decode_status (&startstop, drive, DRQ_STAT, &stat_dum)) return startstop; } else { - ide_startstop_t startstop; /* Otherwise, we must wait for DRQ to get set. */ if (ide_wait_stat (&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) return startstop; } /* Arm the interrupt handler. */ - ide_set_handler (drive, handler, WAIT_CMD, cdrom_timer_expiry); + ide_set_handler(drive, handler, timeout, cdrom_timer_expiry); /* Send the command to the device. */ - atapi_output_bytes (drive, cmd_buf, cmd_len); + atapi_output_bytes(drive, cmd_buf, cmd_len); return ide_started; } @@ -840,8 +868,8 @@ drive->name, ireason); } - cdrom_end_request (0, drive); - return -1; + cdrom_end_request(0, drive); + return 1; } /* @@ -1082,7 +1110,7 @@ (65534 / CD_FRAMESIZE) : 65535); /* Set up the command */ - memset (&pc.c, 0, sizeof (pc.c)); + memset(&pc.c, 0, sizeof (pc.c)); pc.c[0] = GPCMD_READ_10; pc.c[7] = (nframes >> 8); pc.c[8] = (nframes & 0xff); @@ -1090,7 +1118,7 @@ /* Send the command to the drive and return. */ return cdrom_transfer_packet_command(drive, pc.c, sizeof(pc.c), - &cdrom_read_intr); + &cdrom_read_intr, WAIT_CMD); } @@ -1133,7 +1161,8 @@ memset (&pc.c, 0, sizeof (pc.c)); pc.c[0] = GPCMD_SEEK; put_unaligned(cpu_to_be32(frame), (unsigned int *) &pc.c[2]); - return cdrom_transfer_packet_command (drive, pc.c, sizeof (pc.c), &cdrom_seek_intr); + return cdrom_transfer_packet_command(drive, pc.c, sizeof(pc.c), + &cdrom_seek_intr, WAIT_CMD); } static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block) @@ -1142,11 +1171,13 @@ info->dma = 0; info->start_seek = jiffies; - return cdrom_start_packet_command (drive, 0, cdrom_start_seek_continuation); + return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation, 0); } -/* Fix up a possibly partially-processed request so that we can - start it over entirely, or even put it back on the request queue. */ +/* + * Fix up a possibly partially-processed request so that we can + * start it over entirely + */ static void restore_request (struct request *rq) { if (rq->buffer != rq->bh->b_data) { @@ -1194,7 +1225,7 @@ info->dma = 0; /* Start sending the read request to the drive. */ - return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation); + return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation, READ); } /**************************************************************************** @@ -1308,9 +1339,12 @@ struct request *rq = HWGROUP(drive)->rq; struct packet_command *pc = (struct packet_command *)rq->buffer; + if (!pc->timeout) + pc->timeout = WAIT_CMD; + /* Send the command to the drive and return. */ - return cdrom_transfer_packet_command (drive, pc->c, - sizeof (pc->c), &cdrom_pc_intr); + return cdrom_transfer_packet_command(drive, pc->c, sizeof(pc->c), + &cdrom_pc_intr, pc->timeout); } @@ -1326,7 +1360,7 @@ len = pc->buflen; /* Start sending the command to the drive. */ - return cdrom_start_packet_command (drive, len, cdrom_do_pc_continuation); + return cdrom_start_packet_command(drive, len, cdrom_do_pc_continuation, 0); } @@ -1335,7 +1369,7 @@ static void cdrom_sleep (int time) { - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(time); } @@ -1387,16 +1421,182 @@ return pc->stat ? -EIO : 0; } +/* + * Write handling + */ +static inline int cdrom_write_check_ireason(ide_drive_t *drive, int len, + int ireason) +{ + /* Two notes about IDE interrupt reason here - 0 means that + * the drive wants to receive data from us, 2 means that + * the drive is expecting data from us. + */ + ireason &= 3; + + if (ireason == 2) { + /* Whoops... The drive wants to send data. */ + printk("%s: cdrom_write_intr: wrong transfer direction!\n", + drive->name); + + /* Throw some data at the drive so it doesn't hang + and quit this request. */ + while (len > 0) { + int dum = 0; + atapi_output_bytes(drive, &dum, sizeof(dum)); + len -= sizeof(dum); + } + } else { + /* Drive wants a command packet, or invalid ireason... */ + printk("%s: cdrom_write_intr: bad interrupt reason %d\n", + drive->name, ireason); + } + + cdrom_end_request(0, drive); + return 1; +} + +static ide_startstop_t cdrom_write_intr(ide_drive_t *drive) +{ + int stat, ireason, len, sectors_to_transfer; + struct cdrom_info *info = drive->driver_data; + int i, dma_error = 0, dma = info->dma; + ide_startstop_t startstop; + + struct request *rq = HWGROUP(drive)->rq; + + /* Check for errors. */ + if (dma) { + info->dma = 0; + if ((dma_error = HWIF(drive)->dmaproc(ide_dma_end, drive))) { + printk("ide-cd: write dma error\n"); + HWIF(drive)->dmaproc(ide_dma_off, drive); + } + } + + if (cdrom_decode_status(&startstop, drive, 0, &stat)) { + printk("ide-cd: write_intr decode_status bad\n"); + return startstop; + } + + if (dma) { + if (dma_error) + return ide_error(drive, "dma error", stat); + + rq = HWGROUP(drive)->rq; + for (i = rq->nr_sectors; i > 0;) { + i -= rq->current_nr_sectors; + ide_end_request(1, HWGROUP(drive)); + } + return ide_stopped; + } + + /* Read the interrupt reason and the transfer length. */ + ireason = IN_BYTE(IDE_NSECTOR_REG); + len = IN_BYTE(IDE_LCYL_REG) + 256 * IN_BYTE(IDE_HCYL_REG); + + /* If DRQ is clear, the command has completed. */ + if ((stat & DRQ_STAT) == 0) { + /* If we're not done writing, complain. + * Otherwise, complete the command normally. + */ + if (rq->current_nr_sectors > 0) { + printk("%s: write_intr: data underrun (%ld blocks)\n", + drive->name, rq->current_nr_sectors); + cdrom_end_request(0, drive); + } else + cdrom_end_request(1, drive); + return ide_stopped; + } + + /* Check that the drive is expecting to do the same thing we are. */ + if (ireason & 3) + if (cdrom_write_check_ireason(drive, len, ireason)) + return ide_stopped; + + /* The number of sectors we need to read from the drive. */ + sectors_to_transfer = len / SECTOR_SIZE; + + /* Now loop while we still have data to read from the drive. DMA + * transfers will already have been complete + */ + while (sectors_to_transfer > 0) { + /* If we've filled the present buffer but there's another + chained buffer after it, move on. */ + if (rq->current_nr_sectors == 0 && rq->nr_sectors > 0) + cdrom_end_request(1, drive); + + atapi_output_bytes(drive, rq->buffer, rq->current_nr_sectors); + rq->nr_sectors -= rq->current_nr_sectors; + rq->current_nr_sectors = 0; + rq->sector += rq->current_nr_sectors; + sectors_to_transfer -= rq->current_nr_sectors; + } + + /* arm handler */ + ide_set_handler(drive, &cdrom_write_intr, 5 * WAIT_CMD, NULL); + return ide_started; +} + +static ide_startstop_t cdrom_start_write_cont(ide_drive_t *drive) +{ + struct packet_command pc; + struct request *rq = HWGROUP(drive)->rq; + unsigned nframes, frame; + + nframes = rq->nr_sectors >> 2; + frame = rq->sector >> 2; + +#define IDECD_WRITE_PARANOIA +#ifdef IDECD_WRITE_PARANOIA + if (nframes != 32 || frame % 32) { + printk("ide-cd: got WRONG write! %u %u\n", frame, nframes); + ide_end_request(1, HWGROUP(drive)); + return ide_stopped; + } +#endif + + memset(&pc.c, 0, sizeof(pc.c)); + /* + * we might as well use WRITE_12, but none of the devices I have + * support the streaming feature anyway, so who cares. + */ + pc.c[0] = GPCMD_WRITE_10; +#if 0 + pc.c[1] = 1 << 3; /* FUA bit */ +#endif + pc.c[7] = (nframes >> 8) & 0xff; + pc.c[8] = nframes & 0xff; + put_unaligned(cpu_to_be32(frame), (unsigned int *)&pc.c[2]); + + return cdrom_transfer_packet_command(drive, pc.c, sizeof(pc.c), + cdrom_write_intr, 2 * WAIT_CMD); +} + +static ide_startstop_t cdrom_start_write(ide_drive_t *drive) +{ + struct cdrom_info *info = drive->driver_data; + + info->nsectors_buffered = 0; + + /* use dma, if possible. we don't need to check more, since we + * know that the transfer is always (at least!) 2KB aligned */ + info->dma = drive->using_dma ? 1 : 0; + + /* Start sending the read request to the drive. */ + return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont, WRITE); +} + /**************************************************************************** * cdrom driver request routine. */ static ide_startstop_t -ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, unsigned long block) +ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, unsigned long block) { ide_startstop_t action; struct cdrom_info *info = drive->driver_data; switch (rq->cmd) { + case WRITE_PACKET: case READ: { if (CDROM_CONFIG_FLAGS(drive)->seeking) { unsigned long elpased = jiffies - info->start_seek; @@ -1413,8 +1613,12 @@ } if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) action = cdrom_start_seek (drive, block); - else - action = cdrom_start_read (drive, block); + else { + if (rq->cmd == READ) + action = cdrom_start_read(drive, block); + else + action = cdrom_start_write(drive); + } info->last_block = block; return action; } @@ -1428,9 +1632,8 @@ cdrom_end_request(1, drive); return ide_do_reset(drive); } - default: { - printk("ide-cd: bad cmd %d\n", rq -> cmd); + printk("ide-cd: bad cmd %d\n", rq->cmd); cdrom_end_request(0, drive); return ide_stopped; } @@ -1777,6 +1980,9 @@ HWIF(drive)->gd->sizes[drive->select.b.unit << PARTN_BITS] = (toc->capacity * SECTORS_PER_FRAME) >> (BLOCK_SIZE_BITS - 9); drive->part[0].nr_sects = toc->capacity * SECTORS_PER_FRAME; + HWIF(drive)->gd->sizes[drive->select.b.unit << PARTN_BITS] = (toc->capacity * SECTORS_PER_FRAME) >> (BLOCK_SIZE_BITS - 9); + drive->part[0].nr_sects = toc->capacity * SECTORS_PER_FRAME; + /* Remember that we've read this stuff. */ CDROM_STATE_FLAGS (drive)->toc_valid = 1; @@ -1822,8 +2028,9 @@ pc.c[2] = (speed >> 8) & 0xff; /* Read Drive speed in kbytes/second LSB */ pc.c[3] = speed & 0xff; - if ( CDROM_CONFIG_FLAGS(drive)->cd_r || - CDROM_CONFIG_FLAGS(drive)->cd_rw ) { + if (CDROM_CONFIG_FLAGS(drive)->cd_r || + CDROM_CONFIG_FLAGS(drive)->cd_rw || + CDROM_CONFIG_FLAGS(drive)->dvd_r) { /* Write Drive speed in kbytes/second MSB */ pc.c[4] = (speed >> 8) & 0xff; /* Write Drive speed in kbytes/second LSB */ @@ -1875,10 +2082,6 @@ return 0; } - - - - /* the generic packet interface to cdrom.c */ static int ide_cdrom_packet(struct cdrom_device_info *cdi, struct cdrom_generic_command *cgc) @@ -1886,21 +2089,20 @@ struct packet_command pc; ide_drive_t *drive = (ide_drive_t*) cdi->handle; + if (cgc->timeout <= 0) + return -EINVAL; + /* here we queue the commands from the uniform CD-ROM layer. the packet must be complete, as we do not touch it at all. */ memset(&pc, 0, sizeof(pc)); - memcpy(pc.c, cgc->cmd, CDROM_PACKET_SIZE); + memcpy(pc.c, cgc->cmd, CDROM_CDB_SIZE); pc.buffer = cgc->buffer; pc.buflen = cgc->buflen; - cgc->stat = cdrom_queue_packet_command(drive, &pc); - - /* - * FIXME: copy sense, don't just assign pointer!! - */ - cgc->sense = pc.sense; - - return cgc->stat; + pc.quiet = cgc->quiet; + pc.timeout = cgc->timeout; + pc.sense = cgc->sense; + return cgc->stat = cdrom_queue_packet_command(drive, &pc); } static @@ -2192,6 +2394,12 @@ static void ide_cdrom_release_real (struct cdrom_device_info *cdi) { + struct cdrom_generic_command cgc; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; + cgc.quiet = 1; + (void) ide_cdrom_packet(cdi, &cgc); } @@ -2371,15 +2579,11 @@ printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed); printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM"); - if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram) - printk (" DVD%s%s", - (CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "", - (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : ""); - - if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw) - printk (" CD%s%s", - (CDROM_CONFIG_FLAGS (drive)->cd_r)? "-R" : "", - (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : ""); + if (CDROM_CONFIG_FLAGS(drive)->dvd_r || CDROM_CONFIG_FLAGS(drive)->dvd_ram) + printk (" DVD-R%s", (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : ""); + + if (CDROM_CONFIG_FLAGS(drive)->cd_r ||CDROM_CONFIG_FLAGS(drive)->cd_rw) + printk (" CD-R%s", (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : ""); if (CDROM_CONFIG_FLAGS (drive)->is_changer) printk (" changer w/%d slots", nslots); @@ -2402,7 +2606,7 @@ int major = HWIF(drive)->major; int minor = drive->select.b.unit << PARTN_BITS; - ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL); + ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, TYPE_INT, 0, 255, 1, 1024, &read_ahead[major], NULL); ide_add_setting(drive, "file_readahead", SETTING_RW, BLKFRAGET, BLKFRASET, TYPE_INTA, 0, INT_MAX, 1, 1024, &max_readahead[major][minor], NULL); ide_add_setting(drive, "max_kb_per_request", SETTING_RW, BLKSECTGET, BLKSECTSET, TYPE_INTA, 1, 255, 1, 2, &max_sectors[major][minor], NULL); ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); @@ -2416,7 +2620,7 @@ int minor = drive->select.b.unit << PARTN_BITS; int nslots; - set_device_ro(MKDEV(HWIF(drive)->major, minor), 1); + set_device_ro(MKDEV(HWIF(drive)->major, minor), 0); set_blocksize(MKDEV(HWIF(drive)->major, minor), CD_FRAMESIZE); drive->special.all = 0; @@ -2623,7 +2827,7 @@ struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *devinfo = &info->devinfo; - if (ide_unregister_subdriver (drive)) + if (ide_unregister_subdriver(drive)) return 1; if (info->buffer != NULL) kfree(info->buffer); @@ -2631,7 +2835,7 @@ kfree(info->toc); if (info->changer_info != NULL) kfree(info->changer_info); - if (devinfo->handle == drive && unregister_cdrom (devinfo)) + if (devinfo->handle == drive && unregister_cdrom(devinfo)) printk ("%s: ide_cdrom_cleanup failed to unregister device from the cdrom driver.\n", drive->name); kfree(info); drive->driver_data = NULL; @@ -2710,7 +2914,7 @@ kfree (info); continue; } - memset (info, 0, sizeof (struct cdrom_info)); + memset(info, 0, sizeof (struct cdrom_info)); drive->driver_data = info; DRIVER(drive)->busy++; if (ide_cdrom_setup (drive)) { diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-cd.h linux/drivers/ide/ide-cd.h --- /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-cd.h Mon Oct 2 11:02:48 2000 +++ linux/drivers/ide/ide-cd.h Tue Oct 31 07:53:07 2000 @@ -106,6 +106,7 @@ int buflen; int stat; int quiet; + int timeout; struct request_sense *sense; unsigned char c[12]; }; @@ -628,7 +629,9 @@ "Logical unit not ready - in progress [sic] of becoming ready" }, { 0x020402, "Logical unit not ready - initializing command required" }, { 0x020403, "Logical unit not ready - manual intervention required" }, - { 0x020404, "In process of becoming ready - writing" }, + { 0x020404, "Logical unit not ready - format in progress" }, + { 0x020407, "Logical unit not ready - operation in progress" }, + { 0x020408, "Logical unit not ready - long write in progress" }, { 0x020600, "No reference position found (media may be upside down)" }, { 0x023000, "Incompatible medium installed" }, { 0x023a00, "Medium not present" }, @@ -678,7 +681,6 @@ { 0x04b600, "Media load mechanism failed" }, { 0x051a00, "Parameter list length error" }, { 0x052000, "Invalid command operation code" }, - { 0x052c00, "Command sequence error" }, { 0x052100, "Logical block address out of range" }, { 0x052102, "Invalid address for write" }, { 0x052400, "Invalid field in command packet" }, diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-dma.c linux/drivers/ide/ide-dma.c --- /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-dma.c Thu Jul 27 16:40:57 2000 +++ linux/drivers/ide/ide-dma.c Mon Oct 23 01:31:44 2000 @@ -224,6 +224,9 @@ unsigned char *virt_addr = bh->b_data; unsigned int size = bh->b_size; + if (nents >= PRD_ENTRIES) + return 0; + while ((bh = bh->b_reqnext) != NULL) { if ((virt_addr + size) != (unsigned char *) bh->b_data) break; @@ -257,6 +260,9 @@ HWIF(drive)->sg_nents = i = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq); + if (!i) + return 0; + sg = HWIF(drive)->sg_table; while (i && sg_dma_len(sg)) { u32 cur_addr; @@ -266,7 +272,7 @@ cur_len = sg_dma_len(sg); while (cur_len) { - if (++count >= PRD_ENTRIES) { + if (count++ >= PRD_ENTRIES) { printk("%s: DMA table too small\n", drive->name); pci_unmap_sg(HWIF(drive)->pci_dev, HWIF(drive)->sg_table, diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-probe.c linux/drivers/ide/ide-probe.c --- /opt/kernel/linux-2.4.0-test10/drivers/ide/ide-probe.c Thu Aug 3 16:29:49 2000 +++ linux/drivers/ide/ide-probe.c Mon Oct 23 01:31:44 2000 @@ -761,9 +761,10 @@ for (unit = 0; unit < minors; ++unit) { *bs++ = BLOCK_SIZE; #ifdef CONFIG_BLK_DEV_PDC4030 - *max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : MAX_SECTORS); + *max_sect++ = ((hwif->chipset == ide_pdc4030) ? 127 : 256); #else - *max_sect++ = MAX_SECTORS; + /* IDE can do up to 128K per request. */ + *max_sect++ = 256; #endif *max_ra++ = MAX_READAHEAD; } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- /opt/kernel/linux-2.4.0-test10/drivers/scsi/scsi_lib.c Sun Sep 17 10:09:29 2000 +++ linux/drivers/scsi/scsi_lib.c Tue Oct 31 07:47:15 2000 @@ -87,7 +87,6 @@ SCpnt->request.cmd = SPECIAL; SCpnt->request.special = (void *) SCpnt; SCpnt->request.q = NULL; - SCpnt->request.free_list = NULL; SCpnt->request.nr_segments = 0; /* @@ -420,6 +419,7 @@ bh->b_reqnext = NULL; sectors -= bh->b_size >> 9; bh->b_end_io(bh, uptodate); + bh->b_queue = NULL; if ((bh = req->bh) != NULL) { req->current_nr_sectors = bh->b_size >> 9; if (req->nr_sectors < req->current_nr_sectors) { @@ -462,6 +462,9 @@ } add_blkdev_randomness(MAJOR(req->rq_dev)); + if (req->end_io) + req->end_io(req); + SDpnt = SCpnt->device; /* @@ -718,6 +721,11 @@ switch (SCpnt->sense_buffer[2]) { case ILLEGAL_REQUEST: + /* don't requeue a failed WRITE_PACKET request */ + if (SCpnt->request.cmd == WRITE_PACKET) { + SCpnt = scsi_end_request(SCpnt, 0, this_count); + return; + } if (SCpnt->device->ten) { SCpnt->device->ten = 0; /* @@ -870,7 +878,7 @@ * if the device itself is blocked, or if the host is fully * occupied. */ - if (SHpnt->in_recovery || q->plugged) + if (SHpnt->in_recovery) return; /* @@ -883,7 +891,7 @@ * released the lock and grabbed it again, so each time * we need to check to see if the queue is plugged or not. */ - if (SHpnt->in_recovery || q->plugged) + if (SHpnt->in_recovery) return; /* @@ -896,10 +904,11 @@ || (SHpnt->host_blocked) || (SHpnt->host_self_blocked)) { /* - * If we are unable to process any commands at all for this - * device, then we consider it to be starved. What this means - * is that there are no outstanding commands for this device - * and hence we need a little help getting it started again + * If we are unable to process any commands at all for + * this device, then we consider it to be starved. + * What this means is that there are no outstanding + * commands for this device and hence we need a + * little help getting it started again * once the host isn't quite so busy. */ if (SDpnt->device_busy == 0) { @@ -1000,8 +1009,8 @@ } /* * If so, we are ready to do something. Bump the count - * while the queue is locked and then break out of the loop. - * Otherwise loop around and try another request. + * while the queue is locked and then break out of the + * loop. Otherwise loop around and try another request. */ if (!SCpnt) { break; @@ -1029,8 +1038,9 @@ memcpy(&SCpnt->request, req, sizeof(struct request)); /* - * We have copied the data out of the request block - it is now in - * a field in SCpnt. Release the request block. + * We have copied the data out of the request block - + * it is now in a field in SCpnt. Release the request + * block. */ blkdev_release_request(req); } @@ -1047,12 +1057,14 @@ /* * This will do a couple of things: * 1) Fill in the actual SCSI command. - * 2) Fill in any other upper-level specific fields (timeout). + * 2) Fill in any other upper-level specific fields + * (timeout). * - * If this returns 0, it means that the request failed (reading - * past end of disk, reading offline device, etc). This won't - * actually talk to the device, but some kinds of consistency - * checking may cause the request to be rejected immediately. + * If this returns 0, it means that the request failed + * (reading past end of disk, reading offline device, + * etc). This won't actually talk to the device, but + * some kinds of consistency checking may cause the + * request to be rejected immediately. */ if (STpnt == NULL) { STpnt = scsi_get_request_dev(req); @@ -1103,8 +1115,8 @@ scsi_dispatch_cmd(SCpnt); /* - * Now we need to grab the lock again. We are about to mess with - * the request queue and try to find another command. + * Now we need to grab the lock again. We are about to mess + * with the request queue and try to find another command. */ spin_lock_irq(&io_request_lock); } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c --- /opt/kernel/linux-2.4.0-test10/drivers/scsi/scsi_merge.c Mon Oct 30 08:41:33 2000 +++ linux/drivers/scsi/scsi_merge.c Mon Oct 23 01:31:44 2000 @@ -324,7 +324,6 @@ req->nr_segments >= SHpnt->sg_tablesize) return 0; req->nr_segments++; - q->elevator.nr_segments++; return 1; } @@ -341,11 +340,8 @@ if (req->nr_hw_segments >= SHpnt->sg_tablesize || req->nr_segments >= SHpnt->sg_tablesize) return 0; - if (req->nr_segments >= max_segments) - return 0; req->nr_hw_segments++; req->nr_segments++; - q->elevator.nr_segments++; return 1; } #else @@ -361,7 +357,6 @@ * counter. */ req->nr_segments++; - q->elevator.nr_segments++; return 1; } else { return 0; @@ -417,8 +412,10 @@ SDpnt = (Scsi_Device *) q->queuedata; SHpnt = SDpnt->host; +#ifdef DMA_CHUNK_SIZE if (max_segments > 64) max_segments = 64; +#endif if (use_clustering) { /* @@ -471,8 +468,10 @@ SDpnt = (Scsi_Device *) q->queuedata; SHpnt = SDpnt->host; +#ifdef DMA_CHUNK_SIZE if (max_segments > 64) max_segments = 64; +#endif if (use_clustering) { /* @@ -601,10 +600,10 @@ SDpnt = (Scsi_Device *) q->queuedata; SHpnt = SDpnt->host; +#ifdef DMA_CHUNK_SIZE if (max_segments > 64) max_segments = 64; -#ifdef DMA_CHUNK_SIZE /* If it would not fit into prepared memory space for sg chain, * then don't allow the merge. */ @@ -664,7 +663,6 @@ * This one is OK. Let it go. */ req->nr_segments += next->nr_segments - 1; - q->elevator.nr_segments--; #ifdef DMA_CHUNK_SIZE req->nr_hw_segments += next->nr_hw_segments - 1; #endif @@ -820,11 +818,7 @@ /* * First we need to know how many scatter gather segments are needed. */ - if (!sg_count_valid) { - count = __count_segments(req, use_clustering, dma_host, NULL); - } else { - count = req->nr_segments; - } + count = __count_segments(req, use_clustering, dma_host, NULL); /* * If the dma pool is nearly empty, then queue a minimal request diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/scsi/sr.c linux/drivers/scsi/sr.c --- /opt/kernel/linux-2.4.0-test10/drivers/scsi/sr.c Tue Oct 31 06:12:38 2000 +++ linux/drivers/scsi/sr.c Tue Oct 31 06:13:41 2000 @@ -28,12 +28,16 @@ * Modified by Jens Axboe - support DVD-RAM * transparently and loose the GHOST hack * + * Modified by Jens Axboe - support packet writing + * through generic packet layer. + * * Modified by Arnaldo Carvalho de Melo * check resource allocation in sr_init and some cleanups * */ #include +#include #include #include @@ -83,7 +87,7 @@ finish:sr_finish, attach:sr_attach, detach:sr_detach, - init_command:sr_init_command + init_command:sr_init_command, }; Scsi_CD *scsi_CDs; @@ -190,8 +194,9 @@ } /* - * rw_intr is the interrupt routine for the device driver. It will be notified on the - * end of a SCSI read / write, and will take on of several actions based on success or failure. + * rw_intr is the interrupt routine for the device driver. It will be notified + * on the end of a SCSI read / write, and will take on of several actions + * based on success or failure. */ static void rw_intr(Scsi_Cmnd * SCpnt) @@ -201,13 +206,11 @@ int good_sectors = (result == 0 ? this_count : 0); int block_sectors = 0; -#ifdef DEBUG - printk("sr.c done: %x %x\n", result, SCpnt->request.bh->b_data); -#endif /* - Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial success. - Since this is a relatively rare error condition, no care is taken to - avoid unnecessary additional work such as memcpy's that could be avoided. + * Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial + * success. Since this is a relatively rare error condition, no care + * is taken to avoid unnecessary additional work such as memcpy's that + * could be avoided. */ @@ -241,6 +244,7 @@ scsi_CDs[device_nr].capacity - error_sector < 4 * 75) sr_sizes[device_nr] = error_sector >> 1; } + /* * This calls the generic completion function, now that we know * how many actual sectors finished, and how many sectors we need @@ -249,7 +253,6 @@ scsi_io_completion(SCpnt, good_sectors, block_sectors); } - static request_queue_t *sr_find_queue(kdev_t dev) { /* @@ -263,7 +266,8 @@ static int sr_init_command(Scsi_Cmnd * SCpnt) { - int dev, devm, block, this_count; + int dev, devm, this_count; + unsigned long block; devm = MINOR(SCpnt->request.rq_dev); dev = DEVICE_NR(SCpnt->request.rq_dev); @@ -288,8 +292,8 @@ } if (scsi_CDs[dev].device->changed) { /* - * quietly refuse to do anything to a changed disc until the changed - * bit has been reset + * quietly refuse to do anything to a changed disc until + * the changed bit has been reset */ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ return 0; @@ -310,7 +314,7 @@ if (scsi_CDs[dev].device->sector_size == 1024) { if ((block & 1) || (SCpnt->request.nr_sectors & 1)) { - printk("sr.c:Bad 1K block number requested (%d %ld)", + printk("sr.c:Bad 1K block number requested (%lu %ld)", block, SCpnt->request.nr_sectors); return 0; } else { @@ -320,7 +324,7 @@ } if (scsi_CDs[dev].device->sector_size == 2048) { if ((block & 3) || (SCpnt->request.nr_sectors & 3)) { - printk("sr.c:Bad 2K block number requested (%d %ld)", + printk("sr.c:Bad 2K block number requested (%ld %ld)", block, SCpnt->request.nr_sectors); return 0; } else { @@ -329,6 +333,10 @@ } } switch (SCpnt->request.cmd) { + case WRITE_PACKET: + SCpnt->cmnd[0] = WRITE_10; + SCpnt->sc_data_direction = SCSI_DATA_WRITE; + break; case WRITE: SCpnt->cmnd[0] = WRITE_10; SCpnt->sc_data_direction = SCSI_DATA_WRITE; @@ -473,16 +481,17 @@ Scsi_Request *SRpnt; buffer = (unsigned char *) scsi_malloc(512); + SRpnt = scsi_allocate_request(scsi_CDs[i].device); - - if(buffer == NULL || SRpnt == NULL) - { + return; + + if(buffer == NULL || SRpnt == NULL) { scsi_CDs[i].capacity = 0x1fffff; sector_size = 2048; /* A guess, just in case */ scsi_CDs[i].needs_sector_size = 1; - if(buffer) + if (buffer) scsi_free(buffer, 512); - if(SRpnt) + if (SRpnt) scsi_release_request(SRpnt); return; } @@ -492,13 +501,13 @@ cmd[0] = READ_CAPACITY; cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0; memset((void *) &cmd[2], 0, 8); - SRpnt->sr_request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy */ + /* Mark as really busy */ + SRpnt->sr_request.rq_status = RQ_SCSI_BUSY; SRpnt->sr_cmd_len = 0; memset(buffer, 0, 8); /* Do the command and wait.. */ - SRpnt->sr_data_direction = SCSI_DATA_READ; scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer, 8, SR_TIMEOUT, MAX_RETRIES); @@ -514,17 +523,17 @@ if (the_result) { scsi_CDs[i].capacity = 0x1fffff; - sector_size = 2048; /* A guess, just in case */ + /* A guess, just in case */ + sector_size = 2048; scsi_CDs[i].needs_sector_size = 1; } else { -#if 0 if (cdrom_get_last_written(MKDEV(MAJOR_NR, i), - (long *) &scsi_CDs[i].capacity)) -#endif + (long *) &scsi_CDs[i].capacity)) { scsi_CDs[i].capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); + } sector_size = (buffer[4] << 24) | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; switch (sector_size) { @@ -660,10 +669,8 @@ unsigned char *buffer = cgc->buffer; int buflen; - /* get the device */ - SRpnt = scsi_allocate_request(device); - if (SRpnt == NULL) - return -ENODEV; /* this just doesn't seem right /axboe */ + if ((SRpnt = scsi_allocate_request(device)) == NULL) + return -ENOMEM; /* use buffer for ISA DMA */ buflen = (cgc->buflen + 511) & ~511; @@ -684,12 +691,15 @@ /* scsi_wait_req sets the command length */ SRpnt->sr_cmd_len = 0; + if (cgc->timeout <= 0) + cgc->timeout = 5 * HZ; + SRpnt->sr_data_direction = cgc->data_direction; scsi_wait_req(SRpnt, (void *) cgc->cmd, (void *) buffer, cgc->buflen, - SR_TIMEOUT, MAX_RETRIES); + cgc->timeout, MAX_RETRIES); - if ((cgc->stat = SRpnt->sr_result)) - cgc->sense = (struct request_sense *) SRpnt->sr_sense_buffer; + cgc->stat = SRpnt->sr_result; + cgc->sense = (struct request_sense *) SRpnt->sr_sense_buffer; /* release */ SRpnt->sr_request.rq_dev = MKDEV(0, 0); @@ -702,7 +712,6 @@ scsi_free(buffer, buflen); } - return cgc->stat; } @@ -717,7 +726,7 @@ if (!sr_registered) { if (devfs_register_blkdev(MAJOR_NR, "sr", &cdrom_fops)) { - printk("Unable to get major %d for SCSI-CD\n", MAJOR_NR); + printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR); return 1; } sr_registered++; @@ -769,6 +778,7 @@ { int i; char name[6]; + struct cdrom_device_info *cdi; blk_dev[MAJOR_NR].queue = sr_find_queue; blk_size[MAJOR_NR] = sr_sizes; @@ -779,9 +789,11 @@ if (scsi_CDs[i].capacity) continue; scsi_CDs[i].capacity = 0x1fffff; - scsi_CDs[i].device->sector_size = 2048; /* A guess, just in case */ + /* A guess, just in case */ + scsi_CDs[i].device->sector_size = 2048; scsi_CDs[i].needs_sector_size = 1; - scsi_CDs[i].device->changed = 1; /* force recheck CD type */ + /* force recheck CD type */ + scsi_CDs[i].device->changed = 1; #if 0 /* seems better to leave this for later */ get_sectorsize(i); @@ -795,34 +807,35 @@ scsi_CDs[i].readcd_cdda = 0; sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9); - scsi_CDs[i].cdi.ops = &sr_dops; - scsi_CDs[i].cdi.handle = &scsi_CDs[i]; - scsi_CDs[i].cdi.dev = MKDEV(MAJOR_NR, i); - scsi_CDs[i].cdi.mask = 0; - scsi_CDs[i].cdi.capacity = 1; + cdi = &scsi_CDs[i].cdi; + cdi->ops = &sr_dops; + cdi->handle = &scsi_CDs[i]; + cdi->dev = MKDEV(MAJOR_NR, i); + cdi->mask = 0; + cdi->capacity = 1; get_capabilities(i); sr_vendor_init(i); sprintf(name, "sr%d", i); - strcpy(scsi_CDs[i].cdi.name, name); - scsi_CDs[i].cdi.de = - devfs_register (scsi_CDs[i].device->de, "cd", - DEVFS_FL_DEFAULT, MAJOR_NR, i, - S_IFBLK | S_IRUGO | S_IWUGO, - &cdrom_fops, NULL); - register_cdrom(&scsi_CDs[i].cdi); + strcpy(cdi->name, name); + cdi->de = devfs_register(scsi_CDs[i].device->de, "cd", + DEVFS_FL_DEFAULT, MAJOR_NR, i, + S_IFBLK | S_IRUGO | S_IWUGO, + &cdrom_fops, NULL); + register_cdrom(cdi); } /* If our host adapter is capable of scatter-gather, then we increase * the read-ahead to 16 blocks (32 sectors). If not, we use * a two block (4 sector) read ahead. */ - if (scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize) - read_ahead[MAJOR_NR] = 32; /* 32 sector read-ahead. Always removable. */ - else - read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */ - - return; + if (scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize) { + /* 32 sector read-ahead. Always removable. */ + read_ahead[MAJOR_NR] = 32; + } else { + /* 4 sector read-ahead */ + read_ahead[MAJOR_NR] = 4; + } } static void sr_detach(Scsi_Device * SDp) @@ -834,20 +847,23 @@ if (cpnt->device == SDp) { kdev_t devi = MKDEV(MAJOR_NR, i); struct super_block *sb = get_super(devi); + struct cdrom_device_info *cdi; /* - * Since the cdrom is read-only, no need to sync the device. - * We should be kind to our buffer cache, however. + * Since the cdrom is read-only, no need to sync the + * device. We should be kind to our buffer cache, + * however. */ if (sb) invalidate_inodes(sb); invalidate_buffers(devi); /* - * Reset things back to a sane state so that one can re-load a new - * driver (perhaps the same one). + * Reset things back to a sane state so that one can + * re-load a new driver (perhaps the same one). */ - unregister_cdrom(&(cpnt->cdi)); + cdi = &(cpnt->cdi); + unregister_cdrom(cdi); cpnt->device = NULL; cpnt->capacity = 0; SDp->attached--; @@ -856,29 +872,28 @@ sr_sizes[i] = 0; return; } - return; } -static int __init init_sr(void) +static int __init sr_module_init(void) { sr_template.module = THIS_MODULE; return scsi_register_module(MODULE_SCSI_DEV, &sr_template); } -static void __exit exit_sr(void) +static void __exit sr_module_exit(void) { scsi_unregister_module(MODULE_SCSI_DEV, &sr_template); devfs_unregister_blkdev(MAJOR_NR, "sr"); sr_registered--; if (scsi_CDs != NULL) { - kfree(scsi_CDs); + kfree((char *) scsi_CDs); - kfree(sr_sizes); + kfree((char *) sr_sizes); sr_sizes = NULL; - kfree(sr_blocksizes); + kfree((char *) sr_blocksizes); sr_blocksizes = NULL; - kfree(sr_hardsizes); + kfree((char *) sr_hardsizes); sr_hardsizes = NULL; } blksize_size[MAJOR_NR] = NULL; @@ -889,5 +904,5 @@ sr_template.dev_max = 0; } -module_init(init_sr); -module_exit(exit_sr); +module_init(sr_module_init); +module_exit(sr_module_exit); diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/drivers/scsi/sr_ioctl.c linux/drivers/scsi/sr_ioctl.c --- /opt/kernel/linux-2.4.0-test10/drivers/scsi/sr_ioctl.c Tue Sep 5 13:46:15 2000 +++ linux/drivers/scsi/sr_ioctl.c Mon Oct 23 01:31:44 2000 @@ -43,7 +43,8 @@ char *bounce_buffer; SDev = scsi_CDs[target].device; - SRpnt = scsi_allocate_request(scsi_CDs[target].device); + if ((SRpnt = scsi_allocate_request(scsi_CDs[target].device)) == NULL) + return -ENOMEM; SRpnt->sr_data_direction = readwrite; /* use ISA DMA buffer if necessary */ diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/fs/buffer.c linux/fs/buffer.c --- /opt/kernel/linux-2.4.0-test10/fs/buffer.c Mon Oct 30 08:41:33 2000 +++ linux/fs/buffer.c Tue Oct 31 09:24:24 2000 @@ -131,6 +131,11 @@ int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 1*HZ, 1*HZ, 1, 1}; int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,600*HZ, 6000*HZ, 6000*HZ, 2047, 5}; +static void print_buffer_info(struct buffer_head *bh) +{ + printk("bh timed out: sector %lu dev/rdev %s/%s, count %d, state %lx, end_io %p, private %p, list %d\n", bh->b_rsector, kdevname(bh->b_dev), kdevname(bh->b_rdev), atomic_read(&bh->b_count), bh->b_state, bh->b_end_io, bh->b_private, bh->b_list); +} + /* * Rewrote the wait-routines to use the "new" wait-queue functionality, * and getting rid of the cli-sti pairs. The wait-queue routines still @@ -144,135 +149,112 @@ { struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); + int ret; atomic_inc(&bh->b_count); add_wait_queue(&bh->b_wait, &wait); do { - run_task_queue(&tq_disk); + request_queue_t *q = (request_queue_t *) bh->b_queue; + if (q) + generic_unplug_device(q); + else + run_task_queue(&tq_disk); set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!buffer_locked(bh)) break; - schedule(); + ret = schedule_timeout(30*HZ); + if (!ret) + print_buffer_info(bh); } while (buffer_locked(bh)); tsk->state = TASK_RUNNING; remove_wait_queue(&bh->b_wait, &wait); atomic_dec(&bh->b_count); } -/* Call sync_buffers with wait!=0 to ensure that the call does not - * return until all buffer writes have completed. Sync() may return - * before the writes have finished; fsync() may not. - */ - -/* Godamity-damn. Some buffers (bitmaps for filesystems) - * spontaneously dirty themselves without ever brelse being called. - * We will ultimately want to put these in a separate list, but for - * now we search all of the lists for dirty buffers. - */ -static int sync_buffers(kdev_t dev, int wait) +static inline int __sync_buffers(kdev_t dev, int wait, int pass, int list) { - int i, retry, pass = 0, err = 0; - struct buffer_head * bh, *next; + struct buffer_head *bh, *next; + int err = 0, i; - /* One pass for no-wait, three for wait: - * 0) write out all dirty, unlocked buffers; - * 1) write out all dirty buffers, waiting if locked; - * 2) wait for completion by waiting for all buffers to unlock. - */ - do { - retry = 0; +restart: + spin_lock(&lru_list_lock); + if ((bh = lru_list[list]) == NULL) + goto out; - /* We search all lists as a failsafe mechanism, not because we expect - * there to be dirty buffers on any of the other lists. - */ -repeat: - spin_lock(&lru_list_lock); - bh = lru_list[BUF_DIRTY]; - if (!bh) - goto repeat2; + for (i = 2 * nr_buffers_type[list]; i-- > 0; bh = next) { - for (i = nr_buffers_type[BUF_DIRTY]*2 ; i-- > 0 ; bh = next) { - next = bh->b_next_free; + next = bh->b_next_free; - if (!lru_list[BUF_DIRTY]) - break; - if (dev && bh->b_dev != dev) - continue; - if (buffer_locked(bh)) { - /* Buffer is locked; skip it unless wait is - * requested AND pass > 0. - */ - if (!wait || !pass) { - retry = 1; - continue; - } - atomic_inc(&bh->b_count); - spin_unlock(&lru_list_lock); - wait_on_buffer (bh); - atomic_dec(&bh->b_count); - goto repeat; - } + if (lru_list[list] == NULL) + goto out; - /* If an unlocked buffer is not uptodate, there has - * been an IO error. Skip it. - */ - if (wait && buffer_req(bh) && !buffer_locked(bh) && - !buffer_dirty(bh) && !buffer_uptodate(bh)) { - err = -EIO; - continue; - } + if (dev && bh->b_dev != dev) + continue; - /* Don't write clean buffers. Don't write ANY buffers - * on the third pass. + if (buffer_locked(bh)) { + /* Buffer is locked; skip it unless wait is + * requested AND pass > 0. */ - if (!buffer_dirty(bh) || pass >= 2) + if (!wait || !pass) continue; atomic_inc(&bh->b_count); spin_unlock(&lru_list_lock); - ll_rw_block(WRITE, 1, &bh); + wait_on_buffer (bh); atomic_dec(&bh->b_count); - retry = 1; - goto repeat; + goto restart; } - repeat2: - bh = lru_list[BUF_LOCKED]; - if (!bh) { - spin_unlock(&lru_list_lock); - break; + /* If an unlocked buffer is not uptodate, there has + * been an IO error. Skip it. + */ + if (wait && buffer_req(bh) && !buffer_locked(bh) && + !buffer_dirty(bh) && !buffer_uptodate(bh)) { + err = -EIO; + continue; } - for (i = nr_buffers_type[BUF_LOCKED]*2 ; i-- > 0 ; bh = next) { - next = bh->b_next_free; - if (!lru_list[BUF_LOCKED]) - break; - if (dev && bh->b_dev != dev) - continue; - if (buffer_locked(bh)) { - /* Buffer is locked; skip it unless wait is - * requested AND pass > 0. - */ - if (!wait || !pass) { - retry = 1; - continue; - } - atomic_inc(&bh->b_count); - spin_unlock(&lru_list_lock); - wait_on_buffer (bh); - spin_lock(&lru_list_lock); - atomic_dec(&bh->b_count); - goto repeat2; - } - } + /* Don't write clean buffers. Don't write ANY buffers + * on the third pass. + */ + if (!buffer_dirty(bh) || pass >= 2) + continue; + + atomic_inc(&bh->b_count); spin_unlock(&lru_list_lock); + ll_rw_block(WRITE, 1, &bh); + atomic_dec(&bh->b_count); + goto restart; + } + +out: + spin_unlock(&lru_list_lock); + return err; +} + +/* Call sync_buffers with wait!=0 to ensure that the call does not + * return until all buffer writes have completed. Sync() may return + * before the writes have finished; fsync() may not. + */ + +/* Godamity-damn. Some buffers (bitmaps for filesystems) + * spontaneously dirty themselves without ever brelse being called. + * We will ultimately want to put these in a separate list, but for + * now we search all of the lists for dirty buffers. + */ +static int sync_buffers(kdev_t dev, int wait) +{ + int pass = 0, err = 0; + + do { + int _wait = !!pass && wait; + err |= __sync_buffers(dev, _wait, pass, BUF_PACKET); + err |= __sync_buffers(dev, _wait, pass, BUF_DIRTY); + err |= __sync_buffers(dev, _wait, pass, BUF_LOCKED); + if (pass > 1 && current->need_resched) + schedule(); + } while (++pass < 3 && wait); - /* If we are waiting for the sync to succeed, and if any dirty - * blocks were written, then repeat; on the second pass, only - * wait for buffers being written (do not pass to write any - * more buffers on the second pass). - */ - } while (wait && retry && ++pass<=2); return err; } @@ -687,9 +669,9 @@ clear_bit(BH_Uptodate, &bh->b_state); printk(KERN_WARNING "set_blocksize: " - "b_count %d, dev %s, block %lu, from %p\n", + "b_count %d, dev %s, block %lu, from %p, list %d, end_io %p\n", atomic_read(&bh->b_count), bdevname(bh->b_dev), - bh->b_blocknr, __builtin_return_address(0)); + bh->b_blocknr, __builtin_return_address(0), bh->b_list, bh->b_end_io); } write_unlock(&hash_table_lock); if (slept) @@ -729,8 +711,7 @@ static void end_buffer_io_bad(struct buffer_head *bh, int uptodate) { - mark_buffer_uptodate(bh, uptodate); - unlock_buffer(bh); + end_buffer_io_sync(bh, uptodate); BUG(); } @@ -806,12 +787,11 @@ * 14.02.92: changed it to sync dirty buffers a bit: better performance * when the filesystem starts to get full of dirty blocks (I hope). */ -struct buffer_head * getblk(kdev_t dev, int block, int size) +inline struct buffer_head *__getblk(kdev_t dev, int block, int size) { struct buffer_head * bh; int isize; -repeat: spin_lock(&lru_list_lock); write_lock(&hash_table_lock); bh = __get_hash_table(dev, block, size); @@ -820,41 +800,40 @@ isize = BUFSIZE_INDEX(size); spin_lock(&free_list[isize].lock); - bh = free_list[isize].list; - if (bh) { - __remove_from_free_list(bh, isize); - atomic_set(&bh->b_count, 1); + if ((bh = free_list[isize].list) == NULL) { + spin_unlock(&free_list[isize].lock); + goto out; } + + __remove_from_free_list(bh, isize); + atomic_set(&bh->b_count, 1); spin_unlock(&free_list[isize].lock); - /* - * OK, FINALLY we know that this buffer is the only one of - * its kind, we hold a reference (b_count>0), it is unlocked, - * and it is clean. - */ - if (bh) { - init_buffer(bh, end_buffer_io_sync, NULL); - bh->b_dev = dev; - bh->b_blocknr = block; - bh->b_state = 1 << BH_Mapped; + init_buffer(bh, end_buffer_io_sync, NULL); + bh->b_dev = dev; + bh->b_blocknr = block; + bh->b_state = 1 << BH_Mapped; - /* Insert the buffer into the regular lists */ - __insert_into_queues(bh); - out: - write_unlock(&hash_table_lock); - spin_unlock(&lru_list_lock); - touch_buffer(bh); - return bh; - } + /* Insert the buffer into the regular lists */ + __insert_into_queues(bh); - /* - * If we block while refilling the free list, somebody may - * create the buffer first ... search the hashes again. - */ +out: write_unlock(&hash_table_lock); spin_unlock(&lru_list_lock); refill_freelist(size); - goto repeat; + if (bh) + touch_buffer(bh); + return bh; +} + +struct buffer_head * getblk(kdev_t dev, int block, int size) +{ + struct buffer_head *bh; + + while ((bh = __getblk(dev, block, size)) == NULL) + ; + + return bh; } /* -1 -> no need to flush @@ -941,6 +920,10 @@ dispose = BUF_DIRTY; if (buffer_protected(bh)) dispose = BUF_PROTECTED; + if (MAJOR(bh->b_dev) == PACKET_MAJOR && dispose == BUF_DIRTY) + dispose = BUF_PACKET; + if (buffer_dirty(bh) && dispose != BUF_PACKET && MAJOR(bh->b_rdev) == PACKET_MAJOR) + printk("__refile_buffer: rdev is packet, should move?\n"); if (dispose != bh->b_list) { __remove_from_lru_list(bh, bh->b_list); bh->b_list = dispose; @@ -964,7 +947,7 @@ atomic_dec(&buf->b_count); return; } - printk("VFS: brelse: Trying to free free buffer\n"); + printk("VFS: brelse: Trying to free free buffer %lu\n", buf->b_blocknr); } /* @@ -2355,18 +2338,15 @@ void show_buffers(void) { -#ifdef CONFIG_SMP struct buffer_head * bh; int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0; int protected = 0; int nlist; - static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", "PROTECTED", }; -#endif + static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY", "PROTECTED", "PACKET", }; printk("Buffer memory: %6dkB\n", atomic_read(&buffermem_pages) << (PAGE_SHIFT-10)); -#ifdef CONFIG_SMP /* trylock does nothing on UP and so we could deadlock */ if (!spin_trylock(&lru_list_lock)) return; for(nlist = 0; nlist < NR_LIST; nlist++) { @@ -2398,7 +2378,6 @@ used, lastused, locked, protected, dirty); } spin_unlock(&lru_list_lock); -#endif } /* ===================== Init ======================= */ @@ -2503,27 +2482,32 @@ NOTENOTENOTENOTE: we _only_ need to browse the DIRTY lru list as all dirty buffers lives _only_ in the DIRTY lru list. As we never browse the LOCKED and CLEAN lru lists they are infact - completly useless. */ + completly useless. + + First flush "regular" buffers, then move to the packet list */ static int flush_dirty_buffers(int check_flushtime) { struct buffer_head * bh, *next; - int flushed = 0, i; + int flushed = 0, i, list = BUF_DIRTY; restart: spin_lock(&lru_list_lock); - bh = lru_list[BUF_DIRTY]; + bh = lru_list[list]; if (!bh) goto out_unlock; - for (i = nr_buffers_type[BUF_DIRTY]; i-- > 0; bh = next) { + for (i = nr_buffers_type[list]; i-- > 0; bh = next) { next = bh->b_next_free; if (!buffer_dirty(bh)) { __refile_buffer(bh); continue; } - if (buffer_locked(bh)) + if (buffer_locked(bh) || MAJOR(bh->b_dev) == PACKET_MAJOR) continue; + if (list == BUF_PACKET) + goto write; + if (check_flushtime) { /* The dirty lru list is chronologically ordered so if the current bh is not yet timed out, @@ -2537,18 +2521,19 @@ } /* OK, now we are committed to write it out. */ +write: atomic_inc(&bh->b_count); spin_unlock(&lru_list_lock); ll_rw_block(WRITE, 1, &bh); atomic_dec(&bh->b_count); - - if (current->need_resched) - schedule(); goto restart; } out_unlock: spin_unlock(&lru_list_lock); - + if (list == BUF_DIRTY) { + list = BUF_PACKET; + goto restart; + } return flushed; } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/blk.h linux/include/linux/blk.h --- /opt/kernel/linux-2.4.0-test10/include/linux/blk.h Mon Oct 2 11:02:34 2000 +++ linux/include/linux/blk.h Tue Oct 31 07:47:19 2000 @@ -47,6 +47,7 @@ extern int bpcd_init(void); extern int ps2esdi_init(void); extern int jsfd_init(void); +extern int pkt_init(void); #if defined(CONFIG_ARCH_S390) extern int mdisk_init(void); @@ -87,10 +88,6 @@ static inline void blkdev_dequeue_request(struct request * req) { - if (req->e) { - req->e->dequeue_fn(req); - req->e = NULL; - } list_del(&req->queue); } diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/blkdev.h linux/include/linux/blkdev.h --- /opt/kernel/linux-2.4.0-test10/include/linux/blkdev.h Mon Oct 2 11:01:39 2000 +++ linux/include/linux/blkdev.h Tue Oct 31 07:47:19 2000 @@ -12,6 +12,7 @@ struct elevator_s; typedef struct elevator_s elevator_t; + /* * Ok, this is an expanded form so that we can use the same * request for paging requests when that is implemented. In @@ -23,7 +24,7 @@ int elevator_sequence; struct list_head table; - struct list_head *free_list; + int free_list; volatile int rq_status; /* should split this into a few status bits */ #define RQ_INACTIVE (-1) @@ -47,9 +48,12 @@ struct buffer_head * bh; struct buffer_head * bhtail; request_queue_t *q; - elevator_t *e; + + void (*end_io) (struct request *); }; +typedef void (*rq_end_io) (struct request *); + #include typedef int (merge_request_fn) (request_queue_t *q, @@ -87,6 +91,7 @@ request_fn_proc * request_fn; merge_request_fn * back_merge_fn; merge_request_fn * front_merge_fn; + merge_request_fn * hole_merge_fn; merge_requests_fn * merge_requests_fn; make_request_fn * make_request_fn; plug_device_fn * plug_device_fn; @@ -122,6 +127,8 @@ * Tasks wait here for free request */ wait_queue_head_t wait_for_request; + + int total_requests; }; struct blk_dev_struct { @@ -152,6 +159,7 @@ extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void generic_make_request(int rw, struct buffer_head * bh); extern request_queue_t *blk_get_queue(kdev_t dev); +extern inline request_queue_t *__blk_get_queue(kdev_t dev); extern void blkdev_release_request(struct request *); /* @@ -162,6 +170,7 @@ extern void blk_queue_headactive(request_queue_t *, int); extern void blk_queue_pluggable(request_queue_t *, plug_device_fn *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); +extern void generic_unplug_device(void *); extern int * blk_size[MAX_BLKDEV]; @@ -175,9 +184,8 @@ extern int * max_segments[MAX_BLKDEV]; -#define MAX_SECTORS 254 - -#define MAX_SEGMENTS MAX_SECTORS +#define MAX_SEGMENTS 128 +#define MAX_SECTORS (MAX_SEGMENTS*8) #define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK) @@ -191,8 +199,7 @@ #define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next) #define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev) -extern void drive_stat_acct (kdev_t dev, int rw, - unsigned long nr_sectors, int new_io); +extern void drive_stat_acct (kdev_t dev, int rw, unsigned long nr_sectors, int new_io); static inline int get_hardsect_size(kdev_t dev) { diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/cdrom.h linux/include/linux/cdrom.h --- /opt/kernel/linux-2.4.0-test10/include/linux/cdrom.h Mon Oct 2 11:01:39 2000 +++ linux/include/linux/cdrom.h Tue Oct 31 07:47:26 2000 @@ -264,25 +264,13 @@ unsigned short len; }; -#define CDROM_PACKET_SIZE 12 +#define CDROM_CDB_SIZE 12 #define CGC_DATA_UNKNOWN 0 #define CGC_DATA_WRITE 1 #define CGC_DATA_READ 2 #define CGC_DATA_NONE 3 -/* for CDROM_PACKET_COMMAND ioctl */ -struct cdrom_generic_command -{ - unsigned char cmd[CDROM_PACKET_SIZE]; - unsigned char *buffer; - unsigned int buflen; - int stat; - struct request_sense *sense; - unsigned char data_direction; - void *reserved[3]; -}; - /* * A CD-ROM physical sector size is 2048, 2052, 2056, 2324, 2332, 2336, @@ -487,6 +475,7 @@ /* Mode page codes for mode sense/set */ #define GPMODE_R_W_ERROR_PAGE 0x01 #define GPMODE_WRITE_PARMS_PAGE 0x05 +#define GPMODE_WCACHING_PAGE 0x08 #define GPMODE_AUDIO_CTL_PAGE 0x0e #define GPMODE_POWER_PAGE 0x1a #define GPMODE_FAULT_FAIL_PAGE 0x1c @@ -497,7 +486,10 @@ * of MODE_SENSE_POWER_PAGE */ #define GPMODE_CDROM_PAGE 0x0d - +#define GPMODE_PAGE_CURRENT 0 +#define GPMODE_PAGE_CHANGE 1 +#define GPMODE_PAGE_DEFAULT 2 +#define GPMODE_PAGE_SAVE 3 /* DVD struct types */ #define DVD_STRUCT_PHYSICAL 0x00 @@ -675,7 +667,8 @@ struct dvd_lu_send_rpcstate lrpcs; } dvd_authinfo; -struct request_sense { +struct request_sense +{ #if defined(__BIG_ENDIAN_BITFIELD) __u8 valid : 1; __u8 error_code : 7; @@ -703,6 +696,20 @@ __u8 fruc; __u8 sks[3]; __u8 asb[46]; +}; + +/* for CDROM_PACKET_COMMAND ioctl */ +struct cdrom_generic_command +{ + unsigned char cmd[CDROM_CDB_SIZE]; + unsigned char *buffer; + unsigned int buflen; + int stat; + struct request_sense *sense; + unsigned char data_direction; + int quiet; + int timeout; + void *reserved[1]; }; #ifdef __KERNEL__ diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/elevator.h linux/include/linux/elevator.h --- /opt/kernel/linux-2.4.0-test10/include/linux/elevator.h Tue Jul 18 21:43:10 2000 +++ linux/include/linux/elevator.h Fri Oct 27 19:05:03 2000 @@ -7,34 +7,35 @@ struct list_head *, struct list_head *, int); -typedef int (elevator_merge_fn) (request_queue_t *, struct request **, - struct buffer_head *, int, int *, int *); +typedef int (elevator_merge_fn) (request_queue_t *, struct request **, struct list_head *, + struct buffer_head *, int, int, int); -typedef void (elevator_dequeue_fn) (struct request *); +typedef void (elevator_merge_cleanup_fn) (request_queue_t *, struct request *, int, int); + +typedef void (elevator_merge_req_fn) (struct request *, struct request *); struct elevator_s { - int sequence; - int read_latency; int write_latency; - int max_bomb_segments; - - unsigned int nr_segments; - int read_pendings; elevator_fn * elevator_fn; elevator_merge_fn *elevator_merge_fn; - elevator_dequeue_fn *dequeue_fn; + elevator_merge_cleanup_fn *elevator_merge_cleanup_fn; + elevator_merge_req_fn *elevator_merge_req_fn; unsigned int queue_ID; }; void elevator_noop(struct request *, elevator_t *, struct list_head *, struct list_head *, int); -int elevator_noop_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *); -void elevator_noop_dequeue(struct request *); +int elevator_noop_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int); +void elevator_noop_merge_cleanup(request_queue_t *, struct request *, int, int); +void elevator_noop_merge_req(struct request *, struct request *); + void elevator_linus(struct request *, elevator_t *, struct list_head *, struct list_head *, int); -int elevator_linus_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *); +int elevator_linus_merge(request_queue_t *, struct request **, struct list_head *, struct buffer_head *, int, int, int); +void elevator_linus_merge_cleanup(request_queue_t *, struct request *, int, int); +void elevator_linus_merge_req(struct request *, struct request *); typedef struct blkelv_ioctl_arg_s { int queue_ID; @@ -57,6 +58,7 @@ #define ELEVATOR_NO_MERGE 0 #define ELEVATOR_FRONT_MERGE 1 #define ELEVATOR_BACK_MERGE 2 +#define ELEVATOR_HOLE_MERGE 3 /* * This is used in the elevator algorithm. We don't prioritise reads @@ -80,36 +82,26 @@ return latency; } -#define ELEVATOR_NOOP \ -((elevator_t) { \ - 0, /* sequence */ \ - \ - 0, /* read_latency */ \ - 0, /* write_latency */ \ - 0, /* max_bomb_segments */ \ - \ - 0, /* nr_segments */ \ - 0, /* read_pendings */ \ - \ - elevator_noop, /* elevator_fn */ \ - elevator_noop_merge, /* elevator_merge_fn */ \ - elevator_noop_dequeue, /* dequeue_fn */ \ +#define ELEVATOR_NOOP \ +((elevator_t) { \ + 0, /* read_latency */ \ + 0, /* write_latency */ \ + \ + elevator_noop, /* elevator_fn */ \ + elevator_noop_merge, /* elevator_merge_fn */ \ + elevator_noop_merge_cleanup, /* elevator_merge_cleanup_fn */ \ + elevator_noop_merge_req, /* elevator_merge_req_fn */ \ }) -#define ELEVATOR_LINUS \ -((elevator_t) { \ - 0, /* not used */ \ - \ - 1000000, /* read passovers */ \ - 2000000, /* write passovers */ \ - 0, /* max_bomb_segments */ \ - \ - 0, /* not used */ \ - 0, /* not used */ \ - \ - elevator_linus, /* elevator_fn */ \ - elevator_linus_merge, /* elevator_merge_fn */ \ - elevator_noop_dequeue, /* dequeue_fn */ \ +#define ELEVATOR_LINUS \ +((elevator_t) { \ + 512, /* read passovers */ \ + 1024, /* write passovers */ \ + \ + elevator_linus, /* elevator_fn */ \ + elevator_linus_merge, /* elevator_merge_fn */ \ + elevator_linus_merge_cleanup, /* elevator_merge_cleanup_fn */ \ + elevator_linus_merge_req, /* elevator_merge_req_fn */ \ }) #endif diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/fs.h linux/include/linux/fs.h --- /opt/kernel/linux-2.4.0-test10/include/linux/fs.h Mon Oct 30 08:41:35 2000 +++ linux/include/linux/fs.h Tue Oct 31 07:45:37 2000 @@ -71,6 +71,7 @@ #define WRITE 1 #define READA 2 /* read-ahead - don't block if no resources */ #define SPECIAL 4 /* For non-blockdevice requests in request queue */ +#define WRITE_PACKET 5 /* for packet writers */ #define SEL_IN 1 #define SEL_OUT 2 @@ -205,6 +206,7 @@ #define BH_Mapped 4 /* 1 if the buffer has a disk mapping */ #define BH_New 5 /* 1 if the buffer is new and not yet written out */ #define BH_Protected 6 /* 1 if the buffer is protected */ +#define BH_Packet 7 /* 1 if packet writing buffer */ /* * Try to keep the most commonly used fields in single cache lines (16 @@ -243,6 +245,8 @@ unsigned long b_rsector; /* Real buffer location on disk */ wait_queue_head_t b_wait; + + void *b_queue; }; typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); @@ -967,7 +971,8 @@ #define BUF_LOCKED 1 /* Buffers scheduled for write */ #define BUF_DIRTY 2 /* Dirty buffers, not yet scheduled for write */ #define BUF_PROTECTED 3 /* Ramdisk persistent storage */ -#define NR_LIST 4 +#define BUF_PACKET 4 /* packet writing buffers */ +#define NR_LIST 5 /* * This is called by bh->b_end_io() handlers when I/O has completed. @@ -1158,6 +1163,7 @@ extern void file_moveto(struct file *new, struct file *old); extern struct buffer_head * get_hash_table(kdev_t, int, int); extern struct buffer_head * getblk(kdev_t, int, int); +extern inline struct buffer_head *__getblk(kdev_t, int, int); extern void ll_rw_block(int, int, struct buffer_head * bh[]); extern int is_read_only(kdev_t); extern void __brelse(struct buffer_head *); diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/major.h linux/include/linux/major.h --- /opt/kernel/linux-2.4.0-test10/include/linux/major.h Tue Oct 3 09:30:18 2000 +++ linux/include/linux/major.h Thu Oct 26 00:41:10 2000 @@ -108,6 +108,8 @@ #define SPECIALIX_NORMAL_MAJOR 75 #define SPECIALIX_CALLOUT_MAJOR 76 +#define PACKET_MAJOR 97 + #define COMPAQ_CISS_MAJOR 104 #define COMPAQ_CISS_MAJOR1 105 #define COMPAQ_CISS_MAJOR2 106 diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/include/linux/pktcdvd.h linux/include/linux/pktcdvd.h --- /opt/kernel/linux-2.4.0-test10/include/linux/pktcdvd.h Wed Dec 31 16:00:00 1969 +++ linux/include/linux/pktcdvd.h Tue Oct 31 07:53:01 2000 @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2000 Jens Axboe + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and + * DVD-RW devices. + * + */ + +#ifndef PKTCDVD_H +#define PKTCDVD_H + +/* + * 1 for normal debug messages, 2 is very verbose. 0 to turn it off. + */ +#define PACKET_DEBUG 1 + +#define MAX_WRITERS 8 + +/* + * use drive write caching -- we need deferred error handling to be + * able to sucessfully recover with this option (drive will return good + * status as soon as the cdb is validated). + */ +#define USE_WCACHING 0 + +/* + * No user-servicable parts beyond this point -> + */ + +#if PACKET_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) +#else +#define DPRINTK(fmt, args...) +#endif + +#if PACKET_DEBUG > 1 +#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) +#else +#define VPRINTK(fmt, args...) +#endif + +#define PKT_BUF_LIST 0x89 + +/* + * device types + */ +#define PACKET_CDR 1 +#define PACKET_CDRW 2 +#define PACKET_DVDR 3 +#define PACKET_DVDRW 4 + +/* + * flags + */ +#define PACKET_WRITEABLE 1 /* pd is writeable */ +#define PACKET_NWA_VALID 2 /* next writeable address valid */ +#define PACKET_LRA_VALID 3 /* last recorded address valid */ +#define PACKET_READY 4 +#define PACKET_READONLY 5 /* read only pd */ +#define PACKET_THREAD 6 /* kernel thread running */ +#define PACKET_RQ 7 /* current rq is set */ +#define PACKET_BUSY 8 /* current rq is being processed */ +#define PACKET_LOCK 9 /* pd is locked (wrt pd->rq) */ + +/* + * Disc status -- from READ_DISC_INFO + */ +#define PACKET_DISC_EMPTY 0 +#define PACKET_DISC_INCOMPLETE 1 +#define PACKET_DISC_COMPLETE 2 +#define PACKET_DISC_OTHER 3 + +/* + * write type, and corresponding data block type + */ +#define PACKET_MODE1 1 +#define PACKET_MODE2 2 +#define PACKET_BLOCK_MODE1 8 +#define PACKET_BLOCK_MODE2 10 + +/* + * Last session/border status + */ +#define PACKET_SESSION_EMPTY 0 +#define PACKET_SESSION_INCOMPLETE 1 +#define PACKET_SESSION_RESERVED 2 +#define PACKET_SESSION_COMPLETE 3 + +#define PACKET_MCN "4a656e734178626f65323030300000" + +#undef PACKET_USE_LS + +/* + * Very crude stats for now + */ +struct packet_stats +{ + unsigned long bh_s; + unsigned long bh_e; + unsigned long secs_w; + unsigned long secs_r; +}; + +/* + * packet ioctls + */ +#define PACKET_IOCTL_MAGIC ('X') +#define PACKET_GET_STATS _IOR(PACKET_IOCTL_MAGIC, 0, struct packet_stats) +#define PACKET_SETUP_DEV _IOW(PACKET_IOCTL_MAGIC, 1, unsigned int) +#define PACKET_TEARDOWN_DEV _IOW(PACKET_IOCTL_MAGIC, 2, unsigned int) +#define PACKET_WAKEUP _IO(PACKET_IOCTL_MAGIC, 3) + +#ifdef __KERNEL__ +#include + +struct packet_settings +{ + __u8 size; /* packet size in frames */ + __u8 fp; /* fixed packets */ + __u8 link_loss; /* the rest is specified + * as per Mt Fuji */ + __u8 write_type; + __u8 track_mode; + __u8 block_mode; +}; + +struct packet_cdrw +{ + struct buffer_head *bhlist; /* string of bhs */ + atomic_t free_bh; + atomic_t pending_bh; + merge_request_fn *front_merge_fn; + merge_request_fn *back_merge_fn; + merge_request_fn *hole_merge_fn; + merge_requests_fn *merge_requests_fn; + request_queue_t r_queue; + void *queuedata; + pid_t pid; +}; + +struct pktcdvd_device +{ + kdev_t dev; /* dev attached */ + kdev_t pkt_dev; /* our dev */ + char name[20]; + struct cdrom_device_info *cdi; /* cdrom matching dev */ + struct packet_settings settings; + struct packet_stats stats; + atomic_t refcnt; + __u8 speed; /* cur write speed */ + unsigned long offset; /* start offset */ + __u8 mode_offset; /* 0 / 8 */ + __u8 type; + unsigned long flags; + __u8 disc_status; + __u8 track_status; /* last one */ + __u32 nwa; /* next writable address */ + __u32 lra; /* last recorded address */ + spinlock_t lock; + struct packet_cdrw cdrw; + struct dentry *pkt_dentry; + wait_queue_head_t wqueue; + wait_queue_head_t lock_wait; + struct request *rq; /* current request */ +}; + +/* + * following possibly belongs in cdrom.h + */ + +struct cdvd_capacity +{ + __u32 lba; + __u32 block_length; +}; + +extern inline void pd_lock(struct pktcdvd_device *pd, int spin) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&pd->lock_wait, &wait); + + do { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!test_and_set_bit(PACKET_LOCK, &pd->flags)) + break; + if (!spin) + schedule(); + } while (!test_and_set_bit(PACKET_LOCK, &pd->flags)); + + set_task_state(tsk, TASK_RUNNING); + remove_wait_queue(&pd->lock_wait, &wait); +} + +extern inline void pd_unlock(struct pktcdvd_device *pd) +{ + clear_bit(PACKET_LOCK, &pd->flags); + wmb(); + wake_up(&pd->lock_wait); +} + +void pkt_elevator_merge_req(struct request *rq, struct request *nxt) {} +void pkt_elevator_cleanup(request_queue_t *q, struct request *rq, int ret, int front) {} + +#define ELEVATOR_PKTCDVD \ +((elevator_t) { \ + 0, /* read_latency */ \ + 0, /* write_latency */ \ + \ + pkt_elevator, /* elevator_fn */ \ + pkt_elevator_merge, /* elevator_merge_fn */ \ + pkt_elevator_cleanup, \ + pkt_elevator_merge_req, \ + }) + +#endif /* __KERNEL__ */ + +#endif /* PKTCDVD_H */ diff -urN --exclude-from /tmp/exclude /opt/kernel/linux-2.4.0-test10/mm/filemap.c linux/mm/filemap.c --- /opt/kernel/linux-2.4.0-test10/mm/filemap.c Tue Oct 31 06:12:36 2000 +++ linux/mm/filemap.c Tue Oct 31 06:14:12 2000 @@ -855,10 +855,6 @@ * accessed sequentially. */ if (ahead) { - if (reada_ok == 2) { - run_task_queue(&tq_disk); - } - filp->f_ralen += ahead; filp->f_rawin += filp->f_ralen; filp->f_raend = raend + ahead + 1;