diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/aic7xxx/aic7xxx_linux_host.h linux/drivers/scsi/aic7xxx/aic7xxx_linux_host.h --- /opt/kernel/linux-2.4.5/drivers/scsi/aic7xxx/aic7xxx_linux_host.h Sat May 26 13:30:49 2001 +++ linux/drivers/scsi/aic7xxx/aic7xxx_linux_host.h Sun May 27 17:50:26 2001 @@ -81,7 +81,8 @@ present: 0, /* number of 7xxx's present */\ unchecked_isa_dma: 0, /* no memory DMA restrictions */\ use_clustering: ENABLE_CLUSTERING, \ - use_new_eh_code: 1 \ + use_new_eh_code: 1, \ + can_dma_32: 1 \ } #endif /* _AIC7XXX_LINUX_HOST_H_ */ diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/hosts.c linux/drivers/scsi/hosts.c --- /opt/kernel/linux-2.4.5/drivers/scsi/hosts.c Mon Oct 30 23:44:29 2000 +++ linux/drivers/scsi/hosts.c Sun May 27 17:50:26 2001 @@ -230,6 +230,7 @@ retval->cmd_per_lun = tpnt->cmd_per_lun; retval->unchecked_isa_dma = tpnt->unchecked_isa_dma; retval->use_clustering = tpnt->use_clustering; + retval->can_dma_32 = tpnt->can_dma_32; retval->select_queue_depths = tpnt->select_queue_depths; diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/hosts.h linux/drivers/scsi/hosts.h --- /opt/kernel/linux-2.4.5/drivers/scsi/hosts.h Sat Apr 28 00:49:20 2001 +++ linux/drivers/scsi/hosts.h Mon May 28 02:13:02 2001 @@ -286,6 +286,8 @@ */ unsigned emulated:1; + unsigned can_dma_32:1; + /* * Name of proc directory */ @@ -384,6 +386,7 @@ unsigned in_recovery:1; unsigned unchecked_isa_dma:1; unsigned use_clustering:1; + unsigned can_dma_32:1; /* * True if this host was loaded as a loadable module */ diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/scsi.c linux/drivers/scsi/scsi.c --- /opt/kernel/linux-2.4.5/drivers/scsi/scsi.c Sat May 26 13:30:49 2001 +++ linux/drivers/scsi/scsi.c Mon May 28 17:09:04 2001 @@ -176,10 +176,13 @@ * handler in the list - ultimately they call scsi_request_fn * to do the dirty deed. */ -void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) { - blk_init_queue(&SDpnt->request_queue, scsi_request_fn); - blk_queue_headactive(&SDpnt->request_queue, 0); - SDpnt->request_queue.queuedata = (void *) SDpnt; +void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) +{ + request_queue_t *q = &SDpnt->request_queue; + + blk_init_queue(q, scsi_request_fn); + blk_queue_headactive(q, 0); + q->queuedata = (void *) SDpnt; } #ifdef MODULE diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/scsi.h linux/drivers/scsi/scsi.h --- /opt/kernel/linux-2.4.5/drivers/scsi/scsi.h Sat Apr 28 00:49:19 2001 +++ linux/drivers/scsi/scsi.h Mon May 28 17:09:01 2001 @@ -391,7 +391,7 @@ #define CONTIGUOUS_BUFFERS(X,Y) \ (virt_to_phys((X)->b_data+(X)->b_size-1)+1==virt_to_phys((Y)->b_data)) #else -#define CONTIGUOUS_BUFFERS(X,Y) ((X->b_data+X->b_size) == Y->b_data) +#define CONTIGUOUS_BUFFERS(X,Y) (bh_bus((X)) + (X)->b_size == bh_bus((Y))) #endif diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- /opt/kernel/linux-2.4.5/drivers/scsi/scsi_lib.c Sat May 26 13:30:49 2001 +++ linux/drivers/scsi/scsi_lib.c Mon May 28 17:32:34 2001 @@ -406,7 +406,6 @@ q = &SCpnt->device->request_queue; - req->buffer = bh->b_data; /* * Bleah. Leftovers again. Stick the leftovers in * the front of the queue, and goose the queue again. @@ -485,6 +484,8 @@ */ static void scsi_release_buffers(Scsi_Cmnd * SCpnt) { + struct request *req = &SCpnt->request; + ASSERT_LOCK(&io_request_lock, 0); /* @@ -503,9 +504,8 @@ } scsi_free(SCpnt->request_buffer, SCpnt->sglist_len); } else { - if (SCpnt->request_buffer != SCpnt->request.buffer) { - scsi_free(SCpnt->request_buffer, SCpnt->request_bufflen); - } + if (SCpnt->request_buffer != req->buffer) + scsi_free(SCpnt->request_buffer,SCpnt->request_bufflen); } /* @@ -541,6 +541,7 @@ int result = SCpnt->result; int this_count = SCpnt->bufflen >> 9; request_queue_t *q = &SCpnt->device->request_queue; + struct request *req = &SCpnt->request; /* * We must do one of several things here: @@ -570,7 +571,7 @@ for (i = 0; i < SCpnt->use_sg; i++) { if (sgpnt[i].alt_address) { - if (SCpnt->request.cmd == READ) { + if (req->cmd == READ) { memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length); @@ -580,10 +581,11 @@ } scsi_free(SCpnt->buffer, SCpnt->sglist_len); } else { - if (SCpnt->buffer != SCpnt->request.buffer) { - if (SCpnt->request.cmd == READ) { - memcpy(SCpnt->request.buffer, SCpnt->buffer, - SCpnt->bufflen); + if (SCpnt->buffer != req->buffer) { + if (req->cmd == READ) { + char *to = bh_kmap_irq(req->bh); + memcpy(to, SCpnt->buffer, SCpnt->bufflen); + bh_kunmap_irq(to); } scsi_free(SCpnt->buffer, SCpnt->bufflen); } diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c --- /opt/kernel/linux-2.4.5/drivers/scsi/scsi_merge.c Fri Feb 9 20:30:23 2001 +++ linux/drivers/scsi/scsi_merge.c Mon May 28 17:34:46 2001 @@ -6,6 +6,7 @@ * Based upon conversations with large numbers * of people at Linux Expo. * Support for dynamic DMA mapping: Jakub Jelinek (jakub@redhat.com). + * Support for highmem I/O: Jens Axboe */ /* @@ -95,7 +96,7 @@ printk("Segment 0x%p, blocks %d, addr 0x%lx\n", bh, bh->b_size >> 9, - virt_to_phys(bh->b_data - 1)); + bh_bus(bh) - 1); } panic("Ththththaats all folks. Too dangerous to continue.\n"); } @@ -223,8 +224,7 @@ * DMA capable host, make sure that a segment doesn't span * the DMA threshold boundary. */ - if (dma_host && - virt_to_phys(bhnext->b_data) - 1 == ISA_DMA_THRESHOLD) { + if (dma_host && bh_bus(bhnext) - 1 == ISA_DMA_THRESHOLD) { ret++; reqsize = bhnext->b_size; } else if (CONTIGUOUS_BUFFERS(bh, bhnext)) { @@ -241,8 +241,7 @@ * kind of screwed and we need to start * another segment. */ - if( dma_host - && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD + if( dma_host && bh_bus(bh) - 1 >= ISA_DMA_THRESHOLD && reqsize + bhnext->b_size > PAGE_SIZE ) { ret++; @@ -304,7 +303,7 @@ } #define MERGEABLE_BUFFERS(X,Y) \ -(((((long)(X)->b_data+(X)->b_size)|((long)(Y)->b_data)) & \ +(((((long)bh_bus((X))+(X)->b_size)|((long)bh_bus((Y)))) & \ (DMA_CHUNK_SIZE - 1)) == 0) #ifdef DMA_CHUNK_SIZE @@ -424,14 +423,11 @@ * DMA capable host, make sure that a segment doesn't span * the DMA threshold boundary. */ - if (dma_host && - virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) { + if (dma_host && bh_bus(req->bhtail) - 1 == ISA_DMA_THRESHOLD) goto new_end_segment; - } if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) { #ifdef DMA_SEGMENT_SIZE_LIMITED - if( dma_host - && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) { + if (dma_host && bh_bus(bh) - 1 >= ISA_DMA_THRESHOLD) { segment_size = 0; count = __count_segments(req, use_clustering, dma_host, &segment_size); if( segment_size + bh->b_size > PAGE_SIZE ) { @@ -480,14 +476,12 @@ * DMA capable host, make sure that a segment doesn't span * the DMA threshold boundary. */ - if (dma_host && - virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) { + if (dma_host && bh_bus(bh) - 1 == ISA_DMA_THRESHOLD) { goto new_start_segment; } if (CONTIGUOUS_BUFFERS(bh, req->bh)) { #ifdef DMA_SEGMENT_SIZE_LIMITED - if( dma_host - && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) { + if (dma_host && bh_bus(bh) - 1 >= ISA_DMA_THRESHOLD) { segment_size = bh->b_size; count = __count_segments(req, use_clustering, dma_host, &segment_size); if( count != req->nr_segments ) { @@ -635,10 +629,8 @@ * DMA capable host, make sure that a segment doesn't span * the DMA threshold boundary. */ - if (dma_host && - virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) { + if (dma_host && bh_bus(req->bhtail) - 1 == ISA_DMA_THRESHOLD) goto dont_combine; - } #ifdef DMA_SEGMENT_SIZE_LIMITED /* * We currently can only allocate scatter-gather bounce @@ -646,7 +638,7 @@ */ if (dma_host && CONTIGUOUS_BUFFERS(req->bhtail, next->bh) - && virt_to_phys(req->bhtail->b_data) - 1 >= ISA_DMA_THRESHOLD ) + && bh_bus(req->bhtail) - 1 >= ISA_DMA_THRESHOLD ) { int segment_size = 0; int count = 0; @@ -791,29 +783,6 @@ struct scatterlist * sgpnt; int this_count; - /* - * FIXME(eric) - don't inline this - it doesn't depend on the - * integer flags. Come to think of it, I don't think this is even - * needed any more. Need to play with it and see if we hit the - * panic. If not, then don't bother. - */ - if (!SCpnt->request.bh) { - /* - * Case of page request (i.e. raw device), or unlinked buffer - * Typically used for swapping, but this isn't how we do - * swapping any more. - */ - panic("I believe this is dead code. If we hit this, I was wrong"); -#if 0 - SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9; - SCpnt->request_buffer = SCpnt->request.buffer; - SCpnt->use_sg = 0; - /* - * FIXME(eric) - need to handle DMA here. - */ -#endif - return 1; - } req = &SCpnt->request; /* * First we need to know how many scatter gather segments are needed. @@ -830,24 +799,16 @@ * buffer. */ if (dma_host && scsi_dma_free_sectors <= 10) { - this_count = SCpnt->request.current_nr_sectors; - goto single_segment; - } - /* - * Don't bother with scatter-gather if there is only one segment. - */ - if (count == 1) { - this_count = SCpnt->request.nr_sectors; + this_count = req->current_nr_sectors; goto single_segment; } - SCpnt->use_sg = count; /* * Allocate the actual scatter-gather table itself. * scsi_malloc can only allocate in chunks of 512 bytes */ - SCpnt->sglist_len = (SCpnt->use_sg - * sizeof(struct scatterlist) + 511) & ~511; + SCpnt->use_sg = count; + SCpnt->sglist_len = (count * sizeof(struct scatterlist) + 511) & ~511; sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len); @@ -860,7 +821,7 @@ * simply write the first buffer all by itself. */ printk("Warning - running *really* short on DMA buffers\n"); - this_count = SCpnt->request.current_nr_sectors; + this_count = req->current_nr_sectors; goto single_segment; } /* @@ -872,11 +833,9 @@ SCpnt->request_bufflen = 0; bhprev = NULL; - for (count = 0, bh = SCpnt->request.bh; - bh; bh = bh->b_reqnext) { + for (count = 0, bh = req->bh; bh; bh = bh->b_reqnext) { if (use_clustering && bhprev != NULL) { - if (dma_host && - virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) { + if (dma_host && bh_bus(bhprev) - 1 == ISA_DMA_THRESHOLD) { /* Nothing - fall through */ } else if (CONTIGUOUS_BUFFERS(bhprev, bh)) { /* @@ -887,7 +846,7 @@ */ if( dma_host ) { #ifdef DMA_SEGMENT_SIZE_LIMITED - if( virt_to_phys(bh->b_data) - 1 < ISA_DMA_THRESHOLD + if (bh_bus(bh) - 1 < ISA_DMA_THRESHOLD || sgpnt[count - 1].length + bh->b_size <= PAGE_SIZE ) { sgpnt[count - 1].length += bh->b_size; bhprev = bh; @@ -906,12 +865,12 @@ } } } - count++; - sgpnt[count - 1].address = bh->b_data; - sgpnt[count - 1].length += bh->b_size; - if (!dma_host) { + + set_bh_sg(&sgpnt[count], bh); + if (!dma_host) SCpnt->request_bufflen += bh->b_size; - } + + count++; bhprev = bh; } @@ -934,6 +893,10 @@ for (i = 0; i < count; i++) { sectors = (sgpnt[i].length >> 9); SCpnt->request_bufflen += sgpnt[i].length; + /* + * only done for dma_host, in which case .page is not + * set since it's guarenteed to be a low memory page + */ if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 > ISA_DMA_THRESHOLD) { if( scsi_dma_free_sectors - sectors <= 10 ) { @@ -969,7 +932,7 @@ } break; } - if (SCpnt->request.cmd == WRITE) { + if (req->cmd == WRITE) { memcpy(sgpnt[i].address, sgpnt[i].alt_address, sgpnt[i].length); } @@ -1014,8 +977,7 @@ * single-block requests if we had hundreds of free sectors. */ if( scsi_dma_free_sectors > 30 ) { - for (this_count = 0, bh = SCpnt->request.bh; - bh; bh = bh->b_reqnext) { + for (this_count = 0, bh = req->bh; bh; bh = bh->b_reqnext) { if( scsi_dma_free_sectors - this_count < 30 || this_count == sectors ) { @@ -1028,7 +990,7 @@ /* * Yow! Take the absolute minimum here. */ - this_count = SCpnt->request.current_nr_sectors; + this_count = req->current_nr_sectors; } /* @@ -1041,28 +1003,30 @@ * segment. Possibly the entire request, or possibly a small * chunk of the entire request. */ - bh = SCpnt->request.bh; - buff = SCpnt->request.buffer; + bh = req->bh; + buff = req->buffer = bh->b_data; - if (dma_host) { + if (dma_host || PageHighMem(bh->b_page)) { /* * Allocate a DMA bounce buffer. If the allocation fails, fall * back and allocate a really small one - enough to satisfy * the first buffer. */ - if (virt_to_phys(SCpnt->request.bh->b_data) - + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) { + if (bh_bus(bh) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) { buff = (char *) scsi_malloc(this_count << 9); if (!buff) { printk("Warning - running low on DMA memory\n"); - this_count = SCpnt->request.current_nr_sectors; + this_count = req->current_nr_sectors; buff = (char *) scsi_malloc(this_count << 9); if (!buff) { dma_exhausted(SCpnt, 0); } } - if (SCpnt->request.cmd == WRITE) - memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9); + if (req->cmd == WRITE) { + char *buf = bh_kmap_irq(bh); + memcpy(buff, buf, this_count << 9); + bh_kunmap_irq(buf); + } } } SCpnt->request_bufflen = this_count << 9; @@ -1110,14 +1074,6 @@ q = &SDpnt->request_queue; /* - * If the host has already selected a merge manager, then don't - * pick a new one. - */ -#if 0 - if (q->back_merge_fn && q->front_merge_fn) - return; -#endif - /* * If this host has an unlimited tablesize, then don't bother with a * merge manager. The whole point of the operation is to make sure * that requests don't grow too large, and this host isn't picky. @@ -1149,4 +1105,14 @@ q->merge_requests_fn = scsi_merge_requests_fn_dc; SDpnt->scsi_init_io_fn = scsi_init_io_vdc; } + + /* + * now enable highmem I/O, if appropriate + */ + if (SHpnt->can_dma_32 && (SDpnt->type == TYPE_DISK)) { + blk_queue_bounce_limit(q, BLK_BOUNCE_4G); + printk("SCSI: channel %d, id %d: using highmem\n", + SDpnt->channel, SDpnt->id); + } else + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); } diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.5/drivers/scsi/sym53c8xx.h linux/drivers/scsi/sym53c8xx.h --- /opt/kernel/linux-2.4.5/drivers/scsi/sym53c8xx.h Sat Apr 28 00:50:16 2001 +++ linux/drivers/scsi/sym53c8xx.h Sun May 27 17:50:26 2001 @@ -96,7 +96,8 @@ this_id: 7, \ sg_tablesize: SCSI_NCR_SG_TABLESIZE, \ cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \ - use_clustering: DISABLE_CLUSTERING} + use_clustering: DISABLE_CLUSTERING, \ + can_dma_32: 1} #else