--- /opt/kernel/linux-2.4.5/drivers/block/cpqarray.c Sat May 26 13:30:47 2001 +++ linux/drivers/block/cpqarray.c Mon May 28 02:33:10 2001 @@ -363,7 +363,7 @@ static int cpq_back_merge_fn(request_queue_t *q, struct request *rq, struct buffer_head *bh, int max_segments) { - if (rq->bhtail->b_data + rq->bhtail->b_size == bh->b_data) + if (bh_bus(rq->bhtail) + rq->bhtail->b_size == bh_bus(bh)) return 1; return cpq_new_segment(q, rq, max_segments); } @@ -371,7 +371,7 @@ static int cpq_front_merge_fn(request_queue_t *q, struct request *rq, struct buffer_head *bh, int max_segments) { - if (bh->b_data + bh->b_size == rq->bh->b_data) + if (bh_bus(bh) + bh->b_size == bh_bus(rq->bh)) return 1; return cpq_new_segment(q, rq, max_segments); } @@ -381,7 +381,7 @@ { int total_segments = rq->nr_segments + nxt->nr_segments; - if (rq->bhtail->b_data + rq->bhtail->b_size == nxt->bh->b_data) + if (bh_bus(rq->bhtail) + rq->bhtail->b_size == bh_bus(nxt->bh)) total_segments--; if (total_segments > SG_MAX) @@ -528,6 +528,7 @@ q = BLK_DEFAULT_QUEUE(MAJOR_NR + i); q->queuedata = hba[i]; blk_init_queue(q, do_ida_request); + blk_queue_bounce_limit(q, BLK_BOUNCE_4G); blk_queue_headactive(q, 0); blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256); hardsect_size[MAJOR_NR+i] = ida_hardsizes + (i*256); @@ -919,17 +920,22 @@ ctlr_info_t *h = q->queuedata; cmdlist_t *c; int seg, sect; - char *lastdataend; + unsigned long lastdataend; struct list_head * queue_head = &q->queue_head; struct buffer_head *bh; struct request *creq; struct my_sg tmp_sg[SG_MAX]; int i; -// Loop till the queue is empty if or it is plugged + if (q->plugged) { + start_io(h); + return; + } + +// Loop till the queue is empty while (1) { - if (q->plugged || list_empty(queue_head)) { + if (list_empty(queue_head)) { start_io(h); return; } @@ -969,19 +975,20 @@ printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); ); - seg = 0; lastdataend = NULL; + seg = lastdataend = 0; sect = 0; while(bh) { sect += bh->b_size/512; - if (bh->b_data == lastdataend) { + if (bh_bus(bh) == lastdataend) { tmp_sg[seg-1].size += bh->b_size; lastdataend += bh->b_size; } else { if (seg == SG_MAX) BUG(); + tmp_sg[seg].page = bh->b_page; tmp_sg[seg].size = bh->b_size; - tmp_sg[seg].start_addr = bh->b_data; - lastdataend = bh->b_data + bh->b_size; + tmp_sg[seg].offset = bh_offset(bh); + lastdataend = bh_bus(bh) + bh->b_size; seg++; } bh = bh->b_reqnext; @@ -990,9 +997,9 @@ for( i=0; i < seg; i++) { c->req.sg[i].size = tmp_sg[i].size; - c->req.sg[i].addr = (__u32) pci_map_single( - h->pci_dev, tmp_sg[i].start_addr, - tmp_sg[i].size, + c->req.sg[i].addr = (__u32) pci_map_page( + h->pci_dev, tmp_sg[i].page, tmp_sg[i].size, + tmp_sg[i].offset, (creq->cmd == READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); } @@ -1099,7 +1106,7 @@ /* unmap the DMA mapping for all the scatter gather elements */ for(i=0; ireq.hdr.sg_cnt; i++) { - pci_unmap_single(hba[cmd->ctlr]->pci_dev, + pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, cmd->req.sg[i].size, (cmd->req.hdr.cmd == IDA_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); } --- /opt/kernel/linux-2.4.5/drivers/block/cpqarray.h Sat May 26 13:30:47 2001 +++ linux/drivers/block/cpqarray.h Mon May 28 02:25:18 2001 @@ -57,8 +57,9 @@ #ifdef __KERNEL__ struct my_sg { - int size; - char *start_addr; + struct page *page; + unsigned short size; + unsigned short offset; }; struct ctlr_info;