diff -Nru a/Documentation/mmio_barrier.txt b/Documentation/mmio_barrier.txt --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/Documentation/mmio_barrier.txt Fri Apr 26 00:01:27 2002 @@ -0,0 +1,15 @@ +On some platforms, so-called memory-mapped I/O is weakly ordered. For +example, the following might occur: + +CPU A writes 0x1 to Device #1 +CPU B writes 0x2 to Device #1 +Device #1 sees 0x2 +Device #1 sees 0x1 + +On such platforms, driver writers are responsible for ensuring that I/O +writes to memory-mapped addresses on their device arrive in the order +intended. The mmiob() macro is provided for this purpose. A typical use +of this macro might be immediately prior to the exit of a critical +section of code proteced by spinlocks. This would ensure that subsequent +writes to I/O space arrived only after all prior writes (much like a +typical memory barrier op, mb(), only with respect to I/O). diff -Nru a/Makefile b/Makefile --- a/Makefile Fri Apr 26 00:01:26 2002 +++ b/Makefile Fri Apr 26 00:01:26 2002 @@ -88,7 +88,7 @@ CPPFLAGS := -D__KERNEL__ -I$(HPATH) -CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ +CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -g -O2 \ -fomit-frame-pointer -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS) diff -Nru a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c --- a/arch/i386/mm/fault.c Fri Apr 26 00:01:26 2002 +++ b/arch/i386/mm/fault.c Fri Apr 26 00:01:26 2002 @@ -27,8 +27,6 @@ extern void die(const char *,struct pt_regs *,long); -extern int console_loglevel; - /* * Ugly, ugly, but the goto's result in better assembly.. */ diff -Nru a/arch/ia64/Config.help b/arch/ia64/Config.help --- a/arch/ia64/Config.help Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/Config.help Fri Apr 26 00:01:27 2002 @@ -557,3 +557,8 @@ best used in conjunction with the NMI watchdog so that spinlock deadlocks are also debuggable. +CONFIG_IA64_GRANULE_16MB + IA64 identity-mapped regions use a large page size called "granules". + + Select "16MB" for a small granule size. + Select "64MB" for a large granule size. This is the current default. diff -Nru a/arch/ia64/config.in b/arch/ia64/config.in --- a/arch/ia64/config.in Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/config.in Fri Apr 26 00:01:27 2002 @@ -83,7 +83,7 @@ define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore. bool 'SMP support' CONFIG_SMP -tristate 'Support running of Linux/x86 binaries' CONFIG_IA32_SUPPORT +bool 'Support running of Linux/x86 binaries' CONFIG_IA32_SUPPORT bool 'Performance monitor support' CONFIG_PERFMON tristate '/proc/pal support' CONFIG_IA64_PALINFO tristate '/proc/efi/vars support' CONFIG_EFI_VARS @@ -123,6 +123,7 @@ source drivers/ieee1394/Config.in source drivers/message/i2o/Config.in source drivers/md/Config.in +source drivers/message/fusion/Config.in mainmenu_option next_comment comment 'ATA/IDE/MFM/RLL support' diff -Nru a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c --- a/arch/ia64/hp/common/sba_iommu.c Fri Apr 26 00:01:26 2002 +++ b/arch/ia64/hp/common/sba_iommu.c Fri Apr 26 00:01:26 2002 @@ -214,6 +214,7 @@ static struct sba_device *sba_list; static int sba_count; static int reserve_sba_gart = 1; +static struct pci_dev sac_only_dev; #define sba_sg_iova(sg) (sg->address) #define sba_sg_len(sg) (sg->length) @@ -950,7 +951,12 @@ if (ret) { memset(ret, 0, size); - *dma_handle = sba_map_single(hwdev, ret, size, 0); + /* + * REVISIT: if sba_map_single starts needing more + * than dma_mask from the device, this needs to be + * updated. + */ + *dma_handle = sba_map_single(&sac_only_dev, ret, size, 0); } return ret; @@ -1807,1854 +1813,10 @@ sba_dev->sba_hpa = hpa; /* - * We need to check for an AGP device, if we find one, then only - * use part of the IOVA space for PCI DMA, the rest is for GART. - * REVISIT for multiple IOC. + * We pass this fake device from alloc_consistent to ensure + * we only use SAC for alloc_consistent mappings. */ - pci_for_each_dev(device) - agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); - - if (agp_found && reserve_sba_gart) - SBA_SET_AGP(sba_dev); - - sba_hw_init(sba_dev); - sba_common_init(sba_dev); - -#ifdef CONFIG_PROC_FS - { - struct proc_dir_entry * proc_mckinley_root; - - proc_mckinley_root = proc_mkdir("bus/mckinley",0); - create_proc_info_entry(sba_rev, 0, proc_mckinley_root, sba_proc_info); - create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map); - } -#endif -} - -static int __init -nosbagart (char *str) -{ - reserve_sba_gart = 0; - return 1; -} - -__setup("nosbagart",nosbagart); - -EXPORT_SYMBOL(sba_init); -EXPORT_SYMBOL(sba_map_single); -EXPORT_SYMBOL(sba_unmap_single); -EXPORT_SYMBOL(sba_map_sg); -EXPORT_SYMBOL(sba_unmap_sg); -EXPORT_SYMBOL(sba_dma_address); -EXPORT_SYMBOL(sba_alloc_consistent); -EXPORT_SYMBOL(sba_free_consistent); -/* -** IA64 System Bus Adapter (SBA) I/O MMU manager -** -** (c) Copyright 2002 Alex Williamson -** (c) Copyright 2002 Hewlett-Packard Company -** -** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) -** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) -** -** This program is free software; you can redistribute it and/or modify -** it under the terms of the GNU General Public License as published by -** the Free Software Foundation; either version 2 of the License, or -** (at your option) any later version. -** -** -** This module initializes the IOC (I/O Controller) found on HP -** McKinley machines and their successors. -** -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include /* ia64_get_itc() */ -#include -#include /* PAGE_OFFSET */ -#include - - -#define DRIVER_NAME "SBA" - -#ifndef CONFIG_IA64_HP_PROTO -#define ALLOW_IOV_BYPASS -#endif -#define ENABLE_MARK_CLEAN -/* -** The number of debug flags is a clue - this code is fragile. -*/ -#undef DEBUG_SBA_INIT -#undef DEBUG_SBA_RUN -#undef DEBUG_SBA_RUN_SG -#undef DEBUG_SBA_RESOURCE -#undef ASSERT_PDIR_SANITY -#undef DEBUG_LARGE_SG_ENTRIES -#undef DEBUG_BYPASS - -#define SBA_INLINE __inline__ -/* #define SBA_INLINE */ - -#ifdef DEBUG_SBA_INIT -#define DBG_INIT(x...) printk(x) -#else -#define DBG_INIT(x...) -#endif - -#ifdef DEBUG_SBA_RUN -#define DBG_RUN(x...) printk(x) -#else -#define DBG_RUN(x...) -#endif - -#ifdef DEBUG_SBA_RUN_SG -#define DBG_RUN_SG(x...) printk(x) -#else -#define DBG_RUN_SG(x...) -#endif - - -#ifdef DEBUG_SBA_RESOURCE -#define DBG_RES(x...) printk(x) -#else -#define DBG_RES(x...) -#endif - -#ifdef DEBUG_BYPASS -#define DBG_BYPASS(x...) printk(x) -#else -#define DBG_BYPASS(x...) -#endif - -#ifdef ASSERT_PDIR_SANITY -#define ASSERT(expr) \ - if(!(expr)) { \ - printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \ - panic(#expr); \ - } -#else -#define ASSERT(expr) -#endif - -#define KB(x) ((x) * 1024) -#define MB(x) (KB (KB (x))) -#define GB(x) (MB (KB (x))) - -/* -** The number of pdir entries to "free" before issueing -** a read to PCOM register to flush out PCOM writes. -** Interacts with allocation granularity (ie 4 or 8 entries -** allocated and free'd/purged at a time might make this -** less interesting). -*/ -#define DELAYED_RESOURCE_CNT 16 - -#define DEFAULT_DMA_HINT_REG 0 - -#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP) -#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP) - -#define SBA_FUNC_ID 0x0000 /* function id */ -#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ - -#define SBA_FUNC_SIZE 0x10000 /* SBA configuration function reg set */ - -unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000, - 0x9000, 0xa000, -1}; - -#define SBA_IOC_OFFSET 0x1000 - -#define MAX_IOC 1 /* we only have 1 for now*/ - -#define IOC_IBASE 0x300 /* IO TLB */ -#define IOC_IMASK 0x308 -#define IOC_PCOM 0x310 -#define IOC_TCNFG 0x318 -#define IOC_PDIR_BASE 0x320 - -#define IOC_IOVA_SPACE_BASE 0x40000000 /* IOVA ranges start at 1GB */ - -/* -** IOC supports 4/8/16/64KB page sizes (see TCNFG register) -** It's safer (avoid memory corruption) to keep DMA page mappings -** equivalently sized to VM PAGE_SIZE. -** -** We really can't avoid generating a new mapping for each -** page since the Virtual Coherence Index has to be generated -** and updated for each page. -** -** IOVP_SIZE could only be greater than PAGE_SIZE if we are -** confident the drivers really only touch the next physical -** page iff that driver instance owns it. -*/ -#define IOVP_SIZE PAGE_SIZE -#define IOVP_SHIFT PAGE_SHIFT -#define IOVP_MASK PAGE_MASK - -struct ioc { - unsigned long ioc_hpa; /* I/O MMU base address */ - char *res_map; /* resource map, bit == pdir entry */ - u64 *pdir_base; /* physical base address */ - unsigned long ibase; /* pdir IOV Space base */ - unsigned long imask; /* pdir IOV Space mask */ - - unsigned long *res_hint; /* next avail IOVP - circular search */ - spinlock_t res_lock; - unsigned long hint_mask_pdir; /* bits used for DMA hints */ - unsigned int res_bitshift; /* from the RIGHT! */ - unsigned int res_size; /* size of resource map in bytes */ - unsigned int hint_shift_pdir; - unsigned long dma_mask; -#if DELAYED_RESOURCE_CNT > 0 - int saved_cnt; - struct sba_dma_pair { - dma_addr_t iova; - size_t size; - } saved[DELAYED_RESOURCE_CNT]; -#endif - -#ifdef CONFIG_PROC_FS -#define SBA_SEARCH_SAMPLE 0x100 - unsigned long avg_search[SBA_SEARCH_SAMPLE]; - unsigned long avg_idx; /* current index into avg_search */ - unsigned long used_pages; - unsigned long msingle_calls; - unsigned long msingle_pages; - unsigned long msg_calls; - unsigned long msg_pages; - unsigned long usingle_calls; - unsigned long usingle_pages; - unsigned long usg_calls; - unsigned long usg_pages; -#ifdef ALLOW_IOV_BYPASS - unsigned long msingle_bypass; - unsigned long usingle_bypass; - unsigned long msg_bypass; -#endif -#endif - - /* STUFF We don't need in performance path */ - unsigned int pdir_size; /* in bytes, determined by IOV Space size */ -}; - -struct sba_device { - struct sba_device *next; /* list of SBA's in system */ - const char *name; - unsigned long sba_hpa; /* base address */ - spinlock_t sba_lock; - unsigned int flags; /* state/functionality enabled */ - unsigned int hw_rev; /* HW revision of chip */ - - unsigned int num_ioc; /* number of on-board IOC's */ - struct ioc ioc[MAX_IOC]; -}; - - -static struct sba_device *sba_list; -static int sba_count; -static int reserve_sba_gart = 1; - -#define sba_sg_iova(sg) (sg->address) -#define sba_sg_len(sg) (sg->length) -#define sba_sg_buffer(sg) (sg->orig_address) - -/* REVISIT - fix me for multiple SBAs/IOCs */ -#define GET_IOC(dev) (sba_list->ioc) -#define SBA_SET_AGP(sba_dev) (sba_dev->flags |= 0x1) -#define SBA_GET_AGP(sba_dev) (sba_dev->flags & 0x1) - -/* -** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up -** (or rather not merge) DMA's into managable chunks. -** On parisc, this is more of the software/tuning constraint -** rather than the HW. I/O MMU allocation alogorithms can be -** faster with smaller size is (to some degree). -*/ -#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE) - -/* Looks nice and keeps the compiler happy */ -#define SBA_DEV(d) ((struct sba_device *) (d)) - -#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) - -/************************************ -** SBA register read and write support -** -** BE WARNED: register writes are posted. -** (ie follow writes which must reach HW with a read) -** -*/ -#define READ_REG(addr) __raw_readq(addr) -#define WRITE_REG(val, addr) __raw_writeq(val, addr) - -#ifdef DEBUG_SBA_INIT - -/** - * sba_dump_tlb - debugging only - print IOMMU operating parameters - * @hpa: base address of the IOMMU - * - * Print the size/location of the IO MMU PDIR. - */ -static void -sba_dump_tlb(char *hpa) -{ - DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); - DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); - DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); - DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); - DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); - DBG_INIT("\n"); -} -#endif - - -#ifdef ASSERT_PDIR_SANITY - -/** - * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @msg: text to print ont the output line. - * @pide: pdir index. - * - * Print one entry of the IO MMU PDIR in human readable form. - */ -static void -sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) -{ - /* start printing from lowest pde in rval */ - u64 *ptr = &(ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]); - unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); - uint rcnt; - - /* printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", */ - printk("SBA: %s rp %p bit %d rval 0x%lx\n", - msg, rptr, pide & (BITS_PER_LONG - 1), *rptr); - - rcnt = 0; - while (rcnt < BITS_PER_LONG) { - printk("%s %2d %p %016Lx\n", - (rcnt == (pide & (BITS_PER_LONG - 1))) - ? " -->" : " ", - rcnt, ptr, *ptr ); - rcnt++; - ptr++; - } - printk("%s", msg); -} - - -/** - * sba_check_pdir - debugging only - consistency checker - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @msg: text to print ont the output line. - * - * Verify the resource map and pdir state is consistent - */ -static int -sba_check_pdir(struct ioc *ioc, char *msg) -{ - u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); - u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ - u64 *pptr = ioc->pdir_base; /* pdir ptr */ - uint pide = 0; - - while (rptr < rptr_end) { - u64 rval; - int rcnt; /* number of bits we might check */ - - rval = *rptr; - rcnt = 64; - - while (rcnt) { - /* Get last byte and highest bit from that */ - u32 pde = ((u32)((*pptr >> (63)) & 0x1)); - if ((rval & 0x1) ^ pde) - { - /* - ** BUMMER! -- res_map != pdir -- - ** Dump rval and matching pdir entries - */ - sba_dump_pdir_entry(ioc, msg, pide); - return(1); - } - rcnt--; - rval >>= 1; /* try the next bit */ - pptr++; - pide++; - } - rptr++; /* look at next word of res_map */ - } - /* It'd be nice if we always got here :^) */ - return 0; -} - - -/** - * sba_dump_sg - debugging only - print Scatter-Gather list - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @startsg: head of the SG list - * @nents: number of entries in SG list - * - * print the SG list so we can verify it's correct by hand. - */ -static void -sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) -{ - while (nents-- > 0) { - printk(" %d : %08lx/%05x %p\n", - nents, - (unsigned long) sba_sg_iova(startsg), - sba_sg_len(startsg), - sba_sg_buffer(startsg)); - startsg++; - } -} -static void -sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) -{ - struct scatterlist *the_sg = startsg; - int the_nents = nents; - - while (the_nents-- > 0) { - if (sba_sg_buffer(the_sg) == 0x0UL) - sba_dump_sg(NULL, startsg, nents); - the_sg++; - } -} - -#endif /* ASSERT_PDIR_SANITY */ - - - - -/************************************************************** -* -* I/O Pdir Resource Management -* -* Bits set in the resource map are in use. -* Each bit can represent a number of pages. -* LSbs represent lower addresses (IOVA's). -* -***************************************************************/ -#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ - -/* Convert from IOVP to IOVA and vice versa. */ -#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir))) -#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase)) - -/* FIXME : review these macros to verify correctness and usage */ -#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) - -#define RESMAP_MASK(n) ~(~0UL << (n)) -#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) - - -/** - * sba_search_bitmap - find free space in IO PDIR resource bitmap - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @bits_wanted: number of entries we need. - * - * Find consecutive free bits in resource bitmap. - * Each bit represents one entry in the IO Pdir. - * Cool perf optimization: search for log2(size) bits at a time. - */ -static SBA_INLINE unsigned long -sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) -{ - unsigned long *res_ptr = ioc->res_hint; - unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); - unsigned long pide = ~0UL; - - ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); - ASSERT(res_ptr < res_end); - if (bits_wanted > (BITS_PER_LONG/2)) { - /* Search word at a time - no mask needed */ - for(; res_ptr < res_end; ++res_ptr) { - if (*res_ptr == 0) { - *res_ptr = RESMAP_MASK(bits_wanted); - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - break; - } - } - /* point to the next word on next pass */ - res_ptr++; - ioc->res_bitshift = 0; - } else { - /* - ** Search the resource bit map on well-aligned values. - ** "o" is the alignment. - ** We need the alignment to invalidate I/O TLB using - ** SBA HW features in the unmap path. - */ - unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); - uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); - unsigned long mask; - - if (bitshiftcnt >= BITS_PER_LONG) { - bitshiftcnt = 0; - res_ptr++; - } - mask = RESMAP_MASK(bits_wanted) << bitshiftcnt; - - DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); - while(res_ptr < res_end) - { - DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); - ASSERT(0 != mask); - if(0 == ((*res_ptr) & mask)) { - *res_ptr |= mask; /* mark resources busy! */ - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - pide += bitshiftcnt; - break; - } - mask <<= o; - bitshiftcnt += o; - if (0 == mask) { - mask = RESMAP_MASK(bits_wanted); - bitshiftcnt=0; - res_ptr++; - } - } - /* look in the same word on the next pass */ - ioc->res_bitshift = bitshiftcnt + bits_wanted; - } - - /* wrapped ? */ - if (res_end <= res_ptr) { - ioc->res_hint = (unsigned long *) ioc->res_map; - ioc->res_bitshift = 0; - } else { - ioc->res_hint = res_ptr; - } - return (pide); -} - - -/** - * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @size: number of bytes to create a mapping for - * - * Given a size, find consecutive unmarked and then mark those bits in the - * resource bit map. - */ -static int -sba_alloc_range(struct ioc *ioc, size_t size) -{ - unsigned int pages_needed = size >> IOVP_SHIFT; -#ifdef CONFIG_PROC_FS - unsigned long itc_start = ia64_get_itc(); -#endif - unsigned long pide; - - ASSERT(pages_needed); - ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE); - ASSERT(pages_needed <= BITS_PER_LONG); - ASSERT(0 == (size & ~IOVP_MASK)); - - /* - ** "seek and ye shall find"...praying never hurts either... - */ - - pide = sba_search_bitmap(ioc, pages_needed); - if (pide >= (ioc->res_size << 3)) { - pide = sba_search_bitmap(ioc, pages_needed); - if (pide >= (ioc->res_size << 3)) - panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa); - } - -#ifdef ASSERT_PDIR_SANITY - /* verify the first enable bit is clear */ - if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { - sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); - } -#endif - - DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", - __FUNCTION__, size, pages_needed, pide, - (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), - ioc->res_bitshift ); - -#ifdef CONFIG_PROC_FS - { - unsigned long itc_end = ia64_get_itc(); - unsigned long tmp = itc_end - itc_start; - /* check for roll over */ - itc_start = (itc_end < itc_start) ? -(tmp) : (tmp); - } - ioc->avg_search[ioc->avg_idx++] = itc_start; - ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; - - ioc->used_pages += pages_needed; -#endif - - return (pide); -} - - -/** - * sba_free_range - unmark bits in IO PDIR resource bitmap - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @iova: IO virtual address which was previously allocated. - * @size: number of bytes to create a mapping for - * - * clear bits in the ioc's resource map - */ -static SBA_INLINE void -sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) -{ - unsigned long iovp = SBA_IOVP(ioc, iova); - unsigned int pide = PDIR_INDEX(iovp); - unsigned int ridx = pide >> 3; /* convert bit to byte address */ - unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); - - int bits_not_wanted = size >> IOVP_SHIFT; - - /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ - unsigned long m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); - - DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", - __FUNCTION__, (uint) iova, size, - bits_not_wanted, m, pide, res_ptr, *res_ptr); - -#ifdef CONFIG_PROC_FS - ioc->used_pages -= bits_not_wanted; -#endif - - ASSERT(m != 0); - ASSERT(bits_not_wanted); - ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE); - ASSERT(bits_not_wanted <= BITS_PER_LONG); - ASSERT((*res_ptr & m) == m); /* verify same bits are set */ - *res_ptr &= ~m; -} - - -/************************************************************** -* -* "Dynamic DMA Mapping" support (aka "Coherent I/O") -* -***************************************************************/ - -#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) - - -/** - * sba_io_pdir_entry - fill in one IO PDIR entry - * @pdir_ptr: pointer to IO PDIR entry - * @vba: Virtual CPU address of buffer to map - * - * SBA Mapping Routine - * - * Given a virtual address (vba, arg1) sba_io_pdir_entry() - * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). - * Each IO Pdir entry consists of 8 bytes as shown below - * (LSB == bit 0): - * - * 63 40 11 7 0 - * +-+---------------------+----------------------------------+----+--------+ - * |V| U | PPN[39:12] | U | FF | - * +-+---------------------+----------------------------------+----+--------+ - * - * V == Valid Bit - * U == Unused - * PPN == Physical Page Number - * - * The physical address fields are filled with the results of virt_to_phys() - * on the vba. - */ - -#if 1 -#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL) -#else -void SBA_INLINE -sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) -{ - *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); -} -#endif - -#ifdef ENABLE_MARK_CLEAN -/** - * Since DMA is i-cache coherent, any (complete) pages that were written via - * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to - * flush them when they get mapped into an executable vm-area. - */ -static void -mark_clean (void *addr, size_t size) -{ - unsigned long pg_addr, end; - - pg_addr = PAGE_ALIGN((unsigned long) addr); - end = (unsigned long) addr + size; - while (pg_addr + PAGE_SIZE <= end) { - struct page *page = virt_to_page(pg_addr); - set_bit(PG_arch_1, &page->flags); - pg_addr += PAGE_SIZE; - } -} -#endif - -/** - * sba_mark_invalid - invalidate one or more IO PDIR entries - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @iova: IO Virtual Address mapped earlier - * @byte_cnt: number of bytes this mapping covers. - * - * Marking the IO PDIR entry(ies) as Invalid and invalidate - * corresponding IO TLB entry. The PCOM (Purge Command Register) - * is to purge stale entries in the IO TLB when unmapping entries. - * - * The PCOM register supports purging of multiple pages, with a minium - * of 1 page and a maximum of 2GB. Hardware requires the address be - * aligned to the size of the range being purged. The size of the range - * must be a power of 2. The "Cool perf optimization" in the - * allocation routine helps keep that true. - */ -static SBA_INLINE void -sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) -{ - u32 iovp = (u32) SBA_IOVP(ioc,iova); - - int off = PDIR_INDEX(iovp); - - /* Must be non-zero and rounded up */ - ASSERT(byte_cnt > 0); - ASSERT(0 == (byte_cnt & ~IOVP_MASK)); - -#ifdef ASSERT_PDIR_SANITY - /* Assert first pdir entry is set */ - if (!(ioc->pdir_base[off] >> 60)) { - sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); - } -#endif - - if (byte_cnt <= IOVP_SIZE) - { - ASSERT(off < ioc->pdir_size); - - iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ - - /* - ** clear I/O PDIR entry "valid" bit - ** Do NOT clear the rest - save it for debugging. - ** We should only clear bits that have previously - ** been enabled. - */ - ioc->pdir_base[off] &= ~(0x80000000000000FFULL); - } else { - u32 t = get_order(byte_cnt) + PAGE_SHIFT; - - iovp |= t; - ASSERT(t <= 31); /* 2GB! Max value of "size" field */ - - do { - /* verify this pdir entry is enabled */ - ASSERT(ioc->pdir_base[off] >> 63); - /* clear I/O Pdir entry "valid" bit first */ - ioc->pdir_base[off] &= ~(0x80000000000000FFULL); - off++; - byte_cnt -= IOVP_SIZE; - } while (byte_cnt > 0); - } - - WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM); -} - -/** - * sba_map_single - map one buffer and return IOVA for DMA - * @dev: instance of PCI owned by the driver that's asking. - * @addr: driver buffer to map. - * @size: number of bytes to map in driver buffer. - * @direction: R/W or both. - * - * See Documentation/DMA-mapping.txt - */ -dma_addr_t -sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) -{ - struct ioc *ioc; - unsigned long flags; - dma_addr_t iovp; - dma_addr_t offset; - u64 *pdir_start; - int pide; -#ifdef ALLOW_IOV_BYPASS - unsigned long pci_addr = virt_to_phys(addr); -#endif - - ioc = GET_IOC(dev); - ASSERT(ioc); - -#ifdef ALLOW_IOV_BYPASS - /* - ** Check if the PCI device can DMA to ptr... if so, just return ptr - */ - if ((pci_addr & ~dev->dma_mask) == 0) { - /* - ** Device is bit capable of DMA'ing to the buffer... - ** just return the PCI address of ptr - */ -#ifdef CONFIG_PROC_FS - spin_lock_irqsave(&ioc->res_lock, flags); - ioc->msingle_bypass++; - spin_unlock_irqrestore(&ioc->res_lock, flags); -#endif - DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", - dev->dma_mask, pci_addr); - return pci_addr; - } -#endif - - ASSERT(size > 0); - ASSERT(size <= DMA_CHUNK_SIZE); - - /* save offset bits */ - offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; - - /* round up to nearest IOVP_SIZE */ - size = (size + offset + ~IOVP_MASK) & IOVP_MASK; - - spin_lock_irqsave(&ioc->res_lock, flags); -#ifdef ASSERT_PDIR_SANITY - if (sba_check_pdir(ioc,"Check before sba_map_single()")) - panic("Sanity check failed"); -#endif - -#ifdef CONFIG_PROC_FS - ioc->msingle_calls++; - ioc->msingle_pages += size >> IOVP_SHIFT; -#endif - pide = sba_alloc_range(ioc, size); - iovp = (dma_addr_t) pide << IOVP_SHIFT; - - DBG_RUN("%s() 0x%p -> 0x%lx\n", - __FUNCTION__, addr, (long) iovp | offset); - - pdir_start = &(ioc->pdir_base[pide]); - - while (size > 0) { - ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ - sba_io_pdir_entry(pdir_start, (unsigned long) addr); - - DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); - - addr += IOVP_SIZE; - size -= IOVP_SIZE; - pdir_start++; - } - /* form complete address */ -#ifdef ASSERT_PDIR_SANITY - sba_check_pdir(ioc,"Check after sba_map_single()"); -#endif - spin_unlock_irqrestore(&ioc->res_lock, flags); - return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); -} - -/** - * sba_unmap_single - unmap one IOVA and free resources - * @dev: instance of PCI owned by the driver that's asking. - * @iova: IOVA of driver buffer previously mapped. - * @size: number of bytes mapped in driver buffer. - * @direction: R/W or both. - * - * See Documentation/DMA-mapping.txt - */ -void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, - int direction) -{ - struct ioc *ioc; -#if DELAYED_RESOURCE_CNT > 0 - struct sba_dma_pair *d; -#endif - unsigned long flags; - dma_addr_t offset; - - ioc = GET_IOC(dev); - ASSERT(ioc); - -#ifdef ALLOW_IOV_BYPASS - if ((iova & ioc->imask) != ioc->ibase) { - /* - ** Address does not fall w/in IOVA, must be bypassing - */ -#ifdef CONFIG_PROC_FS - spin_lock_irqsave(&ioc->res_lock, flags); - ioc->usingle_bypass++; - spin_unlock_irqrestore(&ioc->res_lock, flags); -#endif - DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); - -#ifdef ENABLE_MARK_CLEAN - if (direction == PCI_DMA_FROMDEVICE) { - mark_clean(phys_to_virt(iova), size); - } -#endif - return; - } -#endif - offset = iova & ~IOVP_MASK; - - DBG_RUN("%s() iovp 0x%lx/%x\n", - __FUNCTION__, (long) iova, size); - - iova ^= offset; /* clear offset bits */ - size += offset; - size = ROUNDUP(size, IOVP_SIZE); - - spin_lock_irqsave(&ioc->res_lock, flags); -#ifdef CONFIG_PROC_FS - ioc->usingle_calls++; - ioc->usingle_pages += size >> IOVP_SHIFT; -#endif - -#if DELAYED_RESOURCE_CNT > 0 - d = &(ioc->saved[ioc->saved_cnt]); - d->iova = iova; - d->size = size; - if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { - int cnt = ioc->saved_cnt; - while (cnt--) { - sba_mark_invalid(ioc, d->iova, d->size); - sba_free_range(ioc, d->iova, d->size); - d--; - } - ioc->saved_cnt = 0; - READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ - } -#else /* DELAYED_RESOURCE_CNT == 0 */ - sba_mark_invalid(ioc, iova, size); - sba_free_range(ioc, iova, size); - READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ -#endif /* DELAYED_RESOURCE_CNT == 0 */ -#ifdef ENABLE_MARK_CLEAN - if (direction == PCI_DMA_FROMDEVICE) { - u32 iovp = (u32) SBA_IOVP(ioc,iova); - int off = PDIR_INDEX(iovp); - void *addr; - - if (size <= IOVP_SIZE) { - addr = phys_to_virt(ioc->pdir_base[off] & - ~0xE000000000000FFFULL); - mark_clean(addr, size); - } else { - size_t byte_cnt = size; - - do { - addr = phys_to_virt(ioc->pdir_base[off] & - ~0xE000000000000FFFULL); - mark_clean(addr, min(byte_cnt, IOVP_SIZE)); - off++; - byte_cnt -= IOVP_SIZE; - - } while (byte_cnt > 0); - } - } -#endif - spin_unlock_irqrestore(&ioc->res_lock, flags); - - /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. - ** For Astro based systems this isn't a big deal WRT performance. - ** As long as 2.4 kernels copyin/copyout data from/to userspace, - ** we don't need the syncdma. The issue here is I/O MMU cachelines - ** are *not* coherent in all cases. May be hwrev dependent. - ** Need to investigate more. - asm volatile("syncdma"); - */ -} - - -/** - * sba_alloc_consistent - allocate/map shared mem for DMA - * @hwdev: instance of PCI owned by the driver that's asking. - * @size: number of bytes mapped in driver buffer. - * @dma_handle: IOVA of new buffer. - * - * See Documentation/DMA-mapping.txt - */ -void * -sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) -{ - void *ret; - - if (!hwdev) { - /* only support PCI */ - *dma_handle = 0; - return 0; - } - - ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size)); - - if (ret) { - memset(ret, 0, size); - *dma_handle = sba_map_single(hwdev, ret, size, 0); - } - - return ret; -} - - -/** - * sba_free_consistent - free/unmap shared mem for DMA - * @hwdev: instance of PCI owned by the driver that's asking. - * @size: number of bytes mapped in driver buffer. - * @vaddr: virtual address IOVA of "consistent" buffer. - * @dma_handler: IO virtual address of "consistent" buffer. - * - * See Documentation/DMA-mapping.txt - */ -void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - sba_unmap_single(hwdev, dma_handle, size, 0); - free_pages((unsigned long) vaddr, get_order(size)); -} - - -/* -** Since 0 is a valid pdir_base index value, can't use that -** to determine if a value is valid or not. Use a flag to indicate -** the SG list entry contains a valid pdir index. -*/ -#define PIDE_FLAG 0x1UL - -#ifdef DEBUG_LARGE_SG_ENTRIES -int dump_run_sg = 0; -#endif - - -/** - * sba_fill_pdir - write allocated SG entries into IO PDIR - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @startsg: list of IOVA/size pairs - * @nents: number of entries in startsg list - * - * Take preprocessed SG list and write corresponding entries - * in the IO PDIR. - */ - -static SBA_INLINE int -sba_fill_pdir( - struct ioc *ioc, - struct scatterlist *startsg, - int nents) -{ - struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ - int n_mappings = 0; - u64 *pdirp = 0; - unsigned long dma_offset = 0; - - dma_sg--; - while (nents-- > 0) { - int cnt = sba_sg_len(startsg); - sba_sg_len(startsg) = 0; - -#ifdef DEBUG_LARGE_SG_ENTRIES - if (dump_run_sg) - printk(" %2d : %08lx/%05x %p\n", - nents, - (unsigned long) sba_sg_iova(startsg), cnt, - sba_sg_buffer(startsg) - ); -#else - DBG_RUN_SG(" %d : %08lx/%05x %p\n", - nents, - (unsigned long) sba_sg_iova(startsg), cnt, - sba_sg_buffer(startsg) - ); -#endif - /* - ** Look for the start of a new DMA stream - */ - if ((u64)sba_sg_iova(startsg) & PIDE_FLAG) { - u32 pide = (u64)sba_sg_iova(startsg) & ~PIDE_FLAG; - dma_offset = (unsigned long) pide & ~IOVP_MASK; - sba_sg_iova(startsg) = 0; - dma_sg++; - sba_sg_iova(dma_sg) = (char *)(pide | ioc->ibase); - pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); - n_mappings++; - } - - /* - ** Look for a VCONTIG chunk - */ - if (cnt) { - unsigned long vaddr = (unsigned long) sba_sg_buffer(startsg); - ASSERT(pdirp); - - /* Since multiple Vcontig blocks could make up - ** one DMA stream, *add* cnt to dma_len. - */ - sba_sg_len(dma_sg) += cnt; - cnt += dma_offset; - dma_offset=0; /* only want offset on first chunk */ - cnt = ROUNDUP(cnt, IOVP_SIZE); -#ifdef CONFIG_PROC_FS - ioc->msg_pages += cnt >> IOVP_SHIFT; -#endif - do { - sba_io_pdir_entry(pdirp, vaddr); - vaddr += IOVP_SIZE; - cnt -= IOVP_SIZE; - pdirp++; - } while (cnt > 0); - } - startsg++; - } -#ifdef DEBUG_LARGE_SG_ENTRIES - dump_run_sg = 0; -#endif - return(n_mappings); -} - - -/* -** Two address ranges are DMA contiguous *iff* "end of prev" and -** "start of next" are both on a page boundry. -** -** (shift left is a quick trick to mask off upper bits) -*/ -#define DMA_CONTIG(__X, __Y) \ - (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL) - - -/** - * sba_coalesce_chunks - preprocess the SG list - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @startsg: list of IOVA/size pairs - * @nents: number of entries in startsg list - * - * First pass is to walk the SG list and determine where the breaks are - * in the DMA stream. Allocates PDIR entries but does not fill them. - * Returns the number of DMA chunks. - * - * Doing the fill seperate from the coalescing/allocation keeps the - * code simpler. Future enhancement could make one pass through - * the sglist do both. - */ -static SBA_INLINE int -sba_coalesce_chunks( struct ioc *ioc, - struct scatterlist *startsg, - int nents) -{ - struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ - unsigned long vcontig_len; /* len of VCONTIG chunk */ - unsigned long vcontig_end; - struct scatterlist *dma_sg; /* next DMA stream head */ - unsigned long dma_offset, dma_len; /* start/len of DMA stream */ - int n_mappings = 0; - - while (nents > 0) { - unsigned long vaddr = (unsigned long) (startsg->address); - - /* - ** Prepare for first/next DMA stream - */ - dma_sg = vcontig_sg = startsg; - dma_len = vcontig_len = vcontig_end = sba_sg_len(startsg); - vcontig_end += vaddr; - dma_offset = vaddr & ~IOVP_MASK; - - /* PARANOID: clear entries */ - sba_sg_buffer(startsg) = sba_sg_iova(startsg); - sba_sg_iova(startsg) = 0; - sba_sg_len(startsg) = 0; - - /* - ** This loop terminates one iteration "early" since - ** it's always looking one "ahead". - */ - while (--nents > 0) { - unsigned long vaddr; /* tmp */ - - startsg++; - - /* catch brokenness in SCSI layer */ - ASSERT(startsg->length <= DMA_CHUNK_SIZE); - - /* - ** First make sure current dma stream won't - ** exceed DMA_CHUNK_SIZE if we coalesce the - ** next entry. - */ - if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK) > DMA_CHUNK_SIZE) - break; - - /* - ** Then look for virtually contiguous blocks. - ** - ** append the next transaction? - */ - vaddr = (unsigned long) sba_sg_iova(startsg); - if (vcontig_end == vaddr) - { - vcontig_len += sba_sg_len(startsg); - vcontig_end += sba_sg_len(startsg); - dma_len += sba_sg_len(startsg); - sba_sg_buffer(startsg) = (char *)vaddr; - sba_sg_iova(startsg) = 0; - sba_sg_len(startsg) = 0; - continue; - } - -#ifdef DEBUG_LARGE_SG_ENTRIES - dump_run_sg = (vcontig_len > IOVP_SIZE); -#endif - - /* - ** Not virtually contigous. - ** Terminate prev chunk. - ** Start a new chunk. - ** - ** Once we start a new VCONTIG chunk, dma_offset - ** can't change. And we need the offset from the first - ** chunk - not the last one. Ergo Successive chunks - ** must start on page boundaries and dove tail - ** with it's predecessor. - */ - sba_sg_len(vcontig_sg) = vcontig_len; - - vcontig_sg = startsg; - vcontig_len = sba_sg_len(startsg); - - /* - ** 3) do the entries end/start on page boundaries? - ** Don't update vcontig_end until we've checked. - */ - if (DMA_CONTIG(vcontig_end, vaddr)) - { - vcontig_end = vcontig_len + vaddr; - dma_len += vcontig_len; - sba_sg_buffer(startsg) = (char *)vaddr; - sba_sg_iova(startsg) = 0; - continue; - } else { - break; - } - } - - /* - ** End of DMA Stream - ** Terminate last VCONTIG block. - ** Allocate space for DMA stream. - */ - sba_sg_len(vcontig_sg) = vcontig_len; - dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK; - ASSERT(dma_len <= DMA_CHUNK_SIZE); - sba_sg_iova(dma_sg) = (char *) (PIDE_FLAG - | (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT) - | dma_offset); - n_mappings++; - } - - return n_mappings; -} - - -/** - * sba_map_sg - map Scatter/Gather list - * @dev: instance of PCI owned by the driver that's asking. - * @sglist: array of buffer/length pairs - * @nents: number of entries in list - * @direction: R/W or both. - * - * See Documentation/DMA-mapping.txt - */ -int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, - int direction) -{ - struct ioc *ioc; - int coalesced, filled = 0; - unsigned long flags; -#ifdef ALLOW_IOV_BYPASS - struct scatterlist *sg; -#endif - - DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); - ioc = GET_IOC(dev); - ASSERT(ioc); - -#ifdef ALLOW_IOV_BYPASS - if (dev->dma_mask >= ioc->dma_mask) { - for (sg = sglist ; filled < nents ; filled++, sg++){ - sba_sg_buffer(sg) = sba_sg_iova(sg); - sba_sg_iova(sg) = (char *)virt_to_phys(sba_sg_buffer(sg)); - } -#ifdef CONFIG_PROC_FS - spin_lock_irqsave(&ioc->res_lock, flags); - ioc->msg_bypass++; - spin_unlock_irqrestore(&ioc->res_lock, flags); -#endif - return filled; - } -#endif - /* Fast path single entry scatterlists. */ - if (nents == 1) { - sba_sg_buffer(sglist) = sba_sg_iova(sglist); - sba_sg_iova(sglist) = (char *)sba_map_single(dev, - sba_sg_buffer(sglist), - sba_sg_len(sglist), direction); -#ifdef CONFIG_PROC_FS - /* - ** Should probably do some stats counting, but trying to - ** be precise quickly starts wasting CPU time. - */ -#endif - return 1; - } - - spin_lock_irqsave(&ioc->res_lock, flags); - -#ifdef ASSERT_PDIR_SANITY - if (sba_check_pdir(ioc,"Check before sba_map_sg()")) - { - sba_dump_sg(ioc, sglist, nents); - panic("Check before sba_map_sg()"); - } -#endif - -#ifdef CONFIG_PROC_FS - ioc->msg_calls++; -#endif - - /* - ** First coalesce the chunks and allocate I/O pdir space - ** - ** If this is one DMA stream, we can properly map using the - ** correct virtual address associated with each DMA page. - ** w/o this association, we wouldn't have coherent DMA! - ** Access to the virtual address is what forces a two pass algorithm. - */ - coalesced = sba_coalesce_chunks(ioc, sglist, nents); - - /* - ** Program the I/O Pdir - ** - ** map the virtual addresses to the I/O Pdir - ** o dma_address will contain the pdir index - ** o dma_len will contain the number of bytes to map - ** o address contains the virtual address. - */ - filled = sba_fill_pdir(ioc, sglist, nents); - -#ifdef ASSERT_PDIR_SANITY - if (sba_check_pdir(ioc,"Check after sba_map_sg()")) - { - sba_dump_sg(ioc, sglist, nents); - panic("Check after sba_map_sg()\n"); - } -#endif - - spin_unlock_irqrestore(&ioc->res_lock, flags); - - ASSERT(coalesced == filled); - DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); - - return filled; -} - - -/** - * sba_unmap_sg - unmap Scatter/Gather list - * @dev: instance of PCI owned by the driver that's asking. - * @sglist: array of buffer/length pairs - * @nents: number of entries in list - * @direction: R/W or both. - * - * See Documentation/DMA-mapping.txt - */ -void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, - int direction) -{ - struct ioc *ioc; -#ifdef ASSERT_PDIR_SANITY - unsigned long flags; -#endif - - DBG_RUN_SG("%s() START %d entries, %p,%x\n", - __FUNCTION__, nents, sba_sg_buffer(sglist), sglist->length); - - ioc = GET_IOC(dev); - ASSERT(ioc); - -#ifdef CONFIG_PROC_FS - ioc->usg_calls++; -#endif - -#ifdef ASSERT_PDIR_SANITY - spin_lock_irqsave(&ioc->res_lock, flags); - sba_check_pdir(ioc,"Check before sba_unmap_sg()"); - spin_unlock_irqrestore(&ioc->res_lock, flags); -#endif - - while (sba_sg_len(sglist) && nents--) { - - sba_unmap_single(dev, (dma_addr_t)sba_sg_iova(sglist), - sba_sg_len(sglist), direction); -#ifdef CONFIG_PROC_FS - /* - ** This leaves inconsistent data in the stats, but we can't - ** tell which sg lists were mapped by map_single and which - ** were coalesced to a single entry. The stats are fun, - ** but speed is more important. - */ - ioc->usg_pages += (((u64)sba_sg_iova(sglist) & ~IOVP_MASK) + sba_sg_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; -#endif - ++sglist; - } - - DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); - -#ifdef ASSERT_PDIR_SANITY - spin_lock_irqsave(&ioc->res_lock, flags); - sba_check_pdir(ioc,"Check after sba_unmap_sg()"); - spin_unlock_irqrestore(&ioc->res_lock, flags); -#endif - -} - -unsigned long -sba_dma_address (struct scatterlist *sg) -{ - return ((unsigned long)sba_sg_iova(sg)); -} - -/************************************************************** -* -* Initialization and claim -* -***************************************************************/ - - -static void -sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num) -{ - u32 iova_space_size, iova_space_mask; - void * pdir_base; - int pdir_size, iov_order, tcnfg; - - /* - ** Firmware programs the maximum IOV space size into the imask reg - */ - iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; -#ifdef CONFIG_IA64_HP_PROTO - if (!iova_space_size) - iova_space_size = GB(1); -#endif - - /* - ** iov_order is always based on a 1GB IOVA space since we want to - ** turn on the other half for AGP GART. - */ - iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT)); - ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); - - DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n", - __FUNCTION__, ioc->ioc_hpa, iova_space_size>>20, - iov_order + PAGE_SHIFT, ioc->pdir_size); - - /* FIXME : DMA HINTs not used */ - ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; - ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); - - ioc->pdir_base = - pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size)); - if (NULL == pdir_base) - { - panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__); - } - memset(pdir_base, 0, pdir_size); - - DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n", - __FUNCTION__, pdir_base, pdir_size, - ioc->hint_shift_pdir, ioc->hint_mask_pdir); - - ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base); - WRITE_REG(virt_to_phys(pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); - - DBG_INIT(" base %p\n", pdir_base); - - /* build IMASK for IOC and Elroy */ - iova_space_mask = 0xffffffff; - iova_space_mask <<= (iov_order + PAGE_SHIFT); - -#ifdef CONFIG_IA64_HP_PROTO - /* - ** REVISIT - this is a kludge, but we won't be supporting anything but - ** zx1 2.0 or greater for real. When fw is in shape, ibase will - ** be preprogrammed w/ the IOVA hole base and imask will give us - ** the size. - */ - if ((sba_dev->hw_rev & 0xFF) < 0x20) { - DBG_INIT("%s() Found SBA rev < 2.0, setting IOVA base to 0. This device will not be supported in the future.\n", __FUNCTION__); - ioc->ibase = 0x0; - } else -#endif - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & 0xFFFFFFFEUL; - - ioc->imask = iova_space_mask; /* save it */ - - DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", - __FUNCTION__, ioc->ibase, ioc->imask); - - /* - ** FIXME: Hint registers are programmed with default hint - ** values during boot, so hints should be sane even if we - ** can't reprogram them the way drivers want. - */ - - WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); - - /* - ** Setting the upper bits makes checking for bypass addresses - ** a little faster later on. - */ - ioc->imask |= 0xFFFFFFFF00000000UL; - - /* Set I/O PDIR Page size to system page size */ - switch (PAGE_SHIFT) { - case 12: /* 4K */ - tcnfg = 0; - break; - case 13: /* 8K */ - tcnfg = 1; - break; - case 14: /* 16K */ - tcnfg = 2; - break; - case 16: /* 64K */ - tcnfg = 3; - break; - } - WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); - - /* - ** Program the IOC's ibase and enable IOVA translation - ** Bit zero == enable bit. - */ - WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); - - /* - ** Clear I/O TLB of any possible entries. - ** (Yes. This is a bit paranoid...but so what) - */ - WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); - - /* - ** If an AGP device is present, only use half of the IOV space - ** for PCI DMA. Unfortunately we can't know ahead of time - ** whether GART support will actually be used, for now we - ** can just key on an AGP device found in the system. - ** We program the next pdir index after we stop w/ a key for - ** the GART code to handshake on. - */ - if (SBA_GET_AGP(sba_dev)) { - DBG_INIT("%s() AGP Device found, reserving 512MB for GART support\n", __FUNCTION__); - ioc->pdir_size /= 2; - ((u64 *)pdir_base)[PDIR_INDEX(iova_space_size/2)] = 0x0000badbadc0ffeeULL; - } - - DBG_INIT("%s() DONE\n", __FUNCTION__); -} - - - -/************************************************************************** -** -** SBA initialization code (HW and SW) -** -** o identify SBA chip itself -** o FIXME: initialize DMA hints for reasonable defaults -** -**************************************************************************/ - -static void -sba_hw_init(struct sba_device *sba_dev) -{ - int i; - int num_ioc; - u64 dma_mask; - u32 func_id; - - /* - ** Identify the SBA so we can set the dma_mask. We can make a virtual - ** dma_mask of the memory subsystem such that devices not implmenting - ** a full 64bit mask might still be able to bypass efficiently. - */ - func_id = READ_REG(sba_dev->sba_hpa + SBA_FUNC_ID); - - if (func_id == ZX1_FUNC_ID_VALUE) { - dma_mask = 0xFFFFFFFFFFUL; - } else { - dma_mask = 0xFFFFFFFFFFFFFFFFUL; - } - - DBG_INIT("%s(): ioc->dma_mask == 0x%lx\n", __FUNCTION__, dma_mask); - - /* - ** Leaving in the multiple ioc code from parisc for the future, - ** currently there are no muli-ioc mckinley sbas - */ - sba_dev->ioc[0].ioc_hpa = SBA_IOC_OFFSET; - num_ioc = 1; - - sba_dev->num_ioc = num_ioc; - for (i = 0; i < num_ioc; i++) { - sba_dev->ioc[i].dma_mask = dma_mask; - sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa; - sba_ioc_init(sba_dev, &(sba_dev->ioc[i]), i); - } -} - -static void -sba_common_init(struct sba_device *sba_dev) -{ - int i; - - /* add this one to the head of the list (order doesn't matter) - ** This will be useful for debugging - especially if we get coredumps - */ - sba_dev->next = sba_list; - sba_list = sba_dev; - sba_count++; - - for(i=0; i< sba_dev->num_ioc; i++) { - int res_size; - - /* resource map size dictated by pdir_size */ - res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ - res_size >>= 3; /* convert bit count to byte count */ - DBG_INIT("%s() res_size 0x%x\n", - __FUNCTION__, res_size); - - sba_dev->ioc[i].res_size = res_size; - sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); - - if (NULL == sba_dev->ioc[i].res_map) - { - panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ ); - } - - memset(sba_dev->ioc[i].res_map, 0, res_size); - /* next available IOVP - circular search */ - if ((sba_dev->hw_rev & 0xFF) >= 0x20) { - sba_dev->ioc[i].res_hint = (unsigned long *) - sba_dev->ioc[i].res_map; - } else { - u64 reserved_iov; - - /* Yet another 1.x hack */ - printk("zx1 1.x: Starting resource hint offset into IOV space to avoid initial zero value IOVA\n"); - sba_dev->ioc[i].res_hint = (unsigned long *) - &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); - - sba_dev->ioc[i].res_map[0] = 0x1; - sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL; - - for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) { - u64 *res_ptr = sba_dev->ioc[i].res_map; - int index = PDIR_INDEX(reserved_iov); - int res_word; - u64 mask; - - res_word = (int)(index / BITS_PER_LONG); - mask = 0x1UL << (index - (res_word * BITS_PER_LONG)); - res_ptr[res_word] |= mask; - sba_dev->ioc[i].pdir_base[PDIR_INDEX(reserved_iov)] = (0x80000000000000FFULL | reserved_iov); - - } - } - -#ifdef ASSERT_PDIR_SANITY - /* Mark first bit busy - ie no IOVA 0 */ - sba_dev->ioc[i].res_map[0] = 0x1; - sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL; -#endif - - DBG_INIT("%s() %d res_map %x %p\n", __FUNCTION__, - i, res_size, (void *)sba_dev->ioc[i].res_map); - } - - sba_dev->sba_lock = SPIN_LOCK_UNLOCKED; -} - -#ifdef CONFIG_PROC_FS -static int sba_proc_info(char *buf, char **start, off_t offset, int len) -{ - struct sba_device *sba_dev = sba_list; - struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ - int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ - unsigned long i = 0, avg = 0, min, max; - - sprintf(buf, "%s rev %d.%d\n", - "Hewlett Packard zx1 SBA", - ((sba_dev->hw_rev >> 4) & 0xF), - (sba_dev->hw_rev & 0xF) - ); - sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", - buf, - (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ - total_pages); - - sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, - total_pages - ioc->used_pages, ioc->used_pages, - (int) (ioc->used_pages * 100 / total_pages)); - - sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", - buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ - - min = max = ioc->avg_search[0]; - for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { - avg += ioc->avg_search[i]; - if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; - if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; - } - avg /= SBA_SEARCH_SAMPLE; - sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", - buf, min, avg, max); - - sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->msingle_calls, ioc->msingle_pages, - (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); -#ifdef ALLOW_IOV_BYPASS - sprintf(buf, "%spci_map_single(): %12ld bypasses\n", - buf, ioc->msingle_bypass); -#endif - - sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->usingle_calls, ioc->usingle_pages, - (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls)); -#ifdef ALLOW_IOV_BYPASS - sprintf(buf, "%spci_unmap_single: %12ld bypasses\n", - buf, ioc->usingle_bypass); -#endif - - sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->msg_calls, ioc->msg_pages, - (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); -#ifdef ALLOW_IOV_BYPASS - sprintf(buf, "%spci_map_sg() : %12ld bypasses\n", - buf, ioc->msg_bypass); -#endif - - sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", - buf, ioc->usg_calls, ioc->usg_pages, - (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); - - return strlen(buf); -} - -static int -sba_resource_map(char *buf, char **start, off_t offset, int len) -{ - struct ioc *ioc = sba_list->ioc; /* FIXME: Multi-IOC support! */ - unsigned int *res_ptr = (unsigned int *)ioc->res_map; - int i; - - buf[0] = '\0'; - for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) { - if ((i & 7) == 0) - strcat(buf,"\n "); - sprintf(buf, "%s %08x", buf, *res_ptr); - } - strcat(buf, "\n"); - - return strlen(buf); -} -#endif - -/* -** Determine if sba should claim this chip (return 0) or not (return 1). -** If so, initialize the chip and tell other partners in crime they -** have work to do. -*/ -void __init sba_init(void) -{ - struct sba_device *sba_dev; - u32 func_id, hw_rev; - u32 *func_offset = NULL; - int i, agp_found = 0; - static char sba_rev[6]; - struct pci_dev *device = NULL; - u64 hpa = 0; - - if (!(device = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_SBA, NULL))) - return; - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - if (pci_resource_flags(device, i) == IORESOURCE_MEM) { - hpa = ioremap(pci_resource_start(device, i), - pci_resource_len(device, i)); - break; - } - } - - func_id = READ_REG(hpa + SBA_FUNC_ID); - - if (func_id == ZX1_FUNC_ID_VALUE) { - (void)strcpy(sba_rev, "zx1"); - func_offset = zx1_func_offsets; - } else { - return; - } - - /* Read HW Rev First */ - hw_rev = READ_REG(hpa + SBA_FCLASS) & 0xFFUL; - - /* - * Not all revision registers of the chipset are updated on every - * turn. Must scan through all functions looking for the highest rev - */ - if (func_offset) { - for (i = 0 ; func_offset[i] != -1 ; i++) { - u32 func_rev; - - func_rev = READ_REG(hpa + SBA_FCLASS + func_offset[i]) & 0xFFUL; - DBG_INIT("%s() func offset: 0x%x rev: 0x%x\n", - __FUNCTION__, func_offset[i], func_rev); - if (func_rev > hw_rev) - hw_rev = func_rev; - } - } - - printk(KERN_INFO "%s found %s %d.%d at %s, HPA 0x%lx\n", DRIVER_NAME, - sba_rev, ((hw_rev >> 4) & 0xF), (hw_rev & 0xF), - device->slot_name, hpa); - - if ((hw_rev & 0xFF) < 0x20) { - printk(KERN_INFO "%s WARNING rev 2.0 or greater will be required for IO MMU support in the future\n", DRIVER_NAME); -#ifndef CONFIG_IA64_HP_PROTO - panic("%s: CONFIG_IA64_HP_PROTO MUST be enabled to support SBA rev less than 2.0", DRIVER_NAME); -#endif - } - - sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); - if (NULL == sba_dev) { - printk(KERN_ERR DRIVER_NAME " - couldn't alloc sba_device\n"); - return; - } - - memset(sba_dev, 0, sizeof(struct sba_device)); - - for(i=0; iioc[i].res_lock)); - - sba_dev->hw_rev = hw_rev; - sba_dev->sba_hpa = hpa; + sac_only_dev.dma_mask = 0xFFFFFFFFUL; /* * We need to check for an AGP device, if we find one, then only diff -Nru a/arch/ia64/hp/zx1/hpzx1_misc.c b/arch/ia64/hp/zx1/hpzx1_misc.c --- a/arch/ia64/hp/zx1/hpzx1_misc.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/hp/zx1/hpzx1_misc.c Fri Apr 26 00:01:27 2002 @@ -42,7 +42,7 @@ static struct fake_pci_dev *fake_pci_head, **fake_pci_tail = &fake_pci_head; -static struct pci_ops orig_pci_ops; +static struct pci_ops *orig_pci_ops; static inline struct fake_pci_dev * fake_pci_find_slot(unsigned char bus, unsigned int devfn) @@ -77,7 +77,7 @@ { \ struct fake_pci_dev *fake_dev; \ if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \ - return orig_pci_ops.name(dev, where, value); \ + return orig_pci_ops->name(dev, where, value); \ \ switch (where) { \ case PCI_COMMAND: \ @@ -105,7 +105,7 @@ { \ struct fake_pci_dev *fake_dev; \ if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \ - return orig_pci_ops.name(dev, where, value); \ + return orig_pci_ops->name(dev, where, value); \ \ switch (where) { \ case PCI_BASE_ADDRESS_0: \ @@ -295,7 +295,7 @@ if (status != AE_OK) return status; - status = acpi_cf_evaluate_method(obj, METHOD_NAME__BBN, &busnum); + status = acpi_evaluate_integer(obj, METHOD_NAME__BBN, NULL, &busnum); if (ACPI_FAILURE(status)) { printk(KERN_ERR PFX "evaluate _BBN fail=0x%x\n", status); busnum = 0; // no _BBN; stick it on bus 0 @@ -313,7 +313,7 @@ static void hpzx1_acpi_dev_init(void) { - extern struct pci_ops pci_conf; + extern struct pci_ops *pci_root_ops; /* * Make fake PCI devices for the following hardware in the @@ -383,8 +383,8 @@ /* * Replace PCI ops, but only if we made fake devices. */ - orig_pci_ops = pci_conf; - pci_conf = hp_pci_conf; + orig_pci_ops = pci_root_ops; + pci_root_ops = &hp_pci_conf; } extern void sba_init(void); diff -Nru a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c --- a/arch/ia64/kernel/acpi.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/kernel/acpi.c Fri Apr 26 00:01:27 2002 @@ -56,18 +56,43 @@ void (*pm_idle) (void); void (*pm_power_off) (void); - -/* - * TBD: Should go away once we have an ACPI parser. - */ const char * acpi_get_sysname (void) { #ifdef CONFIG_IA64_GENERIC - return "hpsim"; + unsigned long rsdp_phys = 0; + struct acpi20_table_rsdp *rsdp; + struct acpi_table_xsdt *xsdt; + struct acpi_table_header *hdr; + + if ((0 != acpi_find_rsdp(&rsdp_phys)) || !rsdp_phys) { + printk("ACPI 2.0 RSDP not found, default to \"dig\"\n"); + return "dig"; + } + + rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys); + if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { + printk("ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); + return "dig"; + } + + xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address); + hdr = &xsdt->header; + if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { + printk("ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); + return "dig"; + } + + if (!strcmp(hdr->oem_id, "HP")) { + return "hpzx1"; + } + + return "dig"; #else # if defined (CONFIG_IA64_HP_SIM) return "hpsim"; +# elif defined (CONFIG_IA64_HP_ZX1) + return "hpzx1"; # elif defined (CONFIG_IA64_SGI_SN1) return "sn1"; # elif defined (CONFIG_IA64_SGI_SN2) @@ -79,6 +104,69 @@ # endif #endif } + +#ifdef CONFIG_ACPI + +/** + * acpi_get_crs - Return the current resource settings for a device + * obj: A handle for this device + * buf: A buffer to be populated by this call. + * + * Pass a valid handle, typically obtained by walking the namespace and a + * pointer to an allocated buffer, and this function will fill in the buffer + * with a list of acpi_resource structures. + */ +acpi_status +acpi_get_crs (acpi_handle obj, acpi_buffer *buf) +{ + acpi_status result; + buf->length = 0; + buf->pointer = NULL; + + result = acpi_get_current_resources(obj, buf); + if (result != AE_BUFFER_OVERFLOW) + return result; + buf->pointer = kmalloc(buf->length, GFP_KERNEL); + if (!buf->pointer) + return -ENOMEM; + + result = acpi_get_current_resources(obj, buf); + + return result; +} + +acpi_resource * +acpi_get_crs_next (acpi_buffer *buf, int *offset) +{ + acpi_resource *res; + + if (*offset >= buf->length) + return NULL; + + res = buf->pointer + *offset; + *offset += res->length; + return res; +} + +acpi_resource_data * +acpi_get_crs_type (acpi_buffer *buf, int *offset, int type) +{ + for (;;) { + acpi_resource *res = acpi_get_crs_next(buf, offset); + if (!res) + return NULL; + if (res->id == type) + return &res->data; + } +} + +void +acpi_dispose_crs (acpi_buffer *buf) +{ + kfree(buf->pointer); +} + +#endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI_BOOT diff -Nru a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S --- a/arch/ia64/kernel/gate.S Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/kernel/gate.S Fri Apr 26 00:01:27 2002 @@ -29,7 +29,7 @@ # define UNAT_OFF IA64_SIGCONTEXT_AR_UNAT_OFFSET # define FPSR_OFF IA64_SIGCONTEXT_AR_FPSR_OFFSET # define PR_OFF IA64_SIGCONTEXT_PR_OFFSET -# define RP_OFF IA64_SIGCONTEXT_B0_OFFSET +# define RP_OFF IA64_SIGCONTEXT_IP_OFFSET # define SP_OFF IA64_SIGCONTEXT_R12_OFFSET # define RBS_BASE_OFF IA64_SIGCONTEXT_RBS_BASE_OFFSET # define LOADRS_OFF IA64_SIGCONTEXT_LOADRS_OFFSET diff -Nru a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c --- a/arch/ia64/kernel/ia64_ksyms.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/kernel/ia64_ksyms.c Fri Apr 26 00:01:27 2002 @@ -6,11 +6,7 @@ #include #include -#undef memset -extern void *memset (void *, int, size_t); EXPORT_SYMBOL_NOVERS(memset); /* gcc generates direct calls to memset()... */ -EXPORT_SYMBOL_NOVERS(__memset_generic); -EXPORT_SYMBOL_NOVERS(__bzero); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL_NOVERS(memcpy); diff -Nru a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c --- a/arch/ia64/kernel/iosapic.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/kernel/iosapic.c Fri Apr 26 00:01:27 2002 @@ -23,6 +23,7 @@ * iosapic_set_affinity(), initializations for * /proc/irq/#/smp_affinity * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing. + * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq */ /* * Here is what the interrupt logic between a PCI device and the CPU looks like: @@ -70,7 +71,7 @@ #undef DEBUG_IRQ_ROUTING -#undef OVERRIDE_DEBUG +#undef OVERRIDE_DEBUG static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED; @@ -676,6 +677,11 @@ pci_irq.route[i].bus, pci_irq.route[i].pci_id>>16, pci_irq.route[i].pin, iosapic_irq[vector].base_irq + iosapic_irq[vector].pin, vector); #endif + + /* + * Forget not to program the IOSAPIC RTE per ACPI _PRT + */ + set_rte(vector, (ia64_get_lid() >> 16) & 0xffff); } } diff -Nru a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S --- a/arch/ia64/kernel/ivt.S Fri Apr 26 00:01:26 2002 +++ b/arch/ia64/kernel/ivt.S Fri Apr 26 00:01:26 2002 @@ -330,12 +330,15 @@ (p8) br.cond.dptk dtlb_fault #endif extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl + and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? - and r19=r19,r16 // clear ed, reserved bits, and PTE control bits - shr.u r18=r16,57 // move address bit 61 to bit 4 + shr.u r18=r16,57 // move address bit 61 to bit 4 + and r19=r19,r16 // clear ed, reserved bits, and PTE control bits + tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? ;; andcm r18=0x10,r18 // bit 4=~address-bit(61) cmp.ne p8,p0=r0,r23 +(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field (p8) br.cond.spnt page_fault dep r21=-1,r21,IA64_PSR_ED_BIT,1 diff -Nru a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c --- a/arch/ia64/kernel/setup.c Fri Apr 26 00:01:26 2002 +++ b/arch/ia64/kernel/setup.c Fri Apr 26 00:01:26 2002 @@ -395,7 +395,7 @@ switch (c->family) { case 0x07: memcpy(family, "Itanium", 8); break; - case 0x1f: memcpy(family, "McKinley", 9); break; + case 0x1f: memcpy(family, "Itanium 2", 9); break; default: sprintf(family, "%u", c->family); break; } diff -Nru a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c --- a/arch/ia64/kernel/signal.c Fri Apr 26 00:01:26 2002 +++ b/arch/ia64/kernel/signal.c Fri Apr 26 00:01:26 2002 @@ -559,7 +559,7 @@ continue; switch (signr) { - case SIGCONT: case SIGCHLD: case SIGWINCH: + case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG: continue; case SIGTSTP: case SIGTTIN: case SIGTTOU: diff -Nru a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c --- a/arch/ia64/kernel/traps.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/kernel/traps.c Fri Apr 26 00:01:27 2002 @@ -186,6 +186,10 @@ sig = SIGSEGV; code = __SEGV_PSTKOVF; break; + case 0x3f000 ... 0x3ffff: /* bundle-update in progress */ + sig = SIGILL; code = __ILL_BNDMOD; + break; + default: if (break_num < 0x40000 || break_num > 0x100000) die_if_kernel("Bad break", regs, break_num); @@ -443,30 +447,14 @@ "Unknown fault 13", "Unknown fault 14", "Unknown fault 15" }; -#if 0 - /* this is for minimal trust debugging; yeah this kind of stuff is useful at times... */ - - if (vector != 25) { - static unsigned long last_time; - static char count; - unsigned long n = vector; - char buf[32], *cp; - - if (jiffies - last_time > 5*HZ) - count = 0; - - if (count++ < 5) { - last_time = jiffies; - cp = buf + sizeof(buf); - *--cp = '\0'; - while (n) { - *--cp = "0123456789abcdef"[n & 0xf]; - n >>= 4; - } - printk("<0x%s>", cp); - } + if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { + /* + * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel + * the lfetch. + */ + ia64_psr(regs)->ed = 1; + return; } -#endif switch (vector) { case 24: /* General Exception */ diff -Nru a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile --- a/arch/ia64/lib/Makefile Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/lib/Makefile Fri Apr 26 00:01:27 2002 @@ -13,7 +13,7 @@ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ checksum.o clear_page.o csum_partial_copy.o copy_page.o \ copy_user.o clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ - flush.o io.o do_csum.o \ + flush.o io.o ip_fast_csum.o do_csum.o \ memcpy.o memset.o strlen.o swiotlb.o obj-$(CONFIG_ITANIUM) += copy_page.o diff -Nru a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c --- a/arch/ia64/lib/checksum.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/lib/checksum.c Fri Apr 26 00:01:27 2002 @@ -15,7 +15,7 @@ #include static inline unsigned short -from64to16(unsigned long x) +from64to16 (unsigned long x) { /* add up 32-bit words for 33 bits */ x = (x & 0xffffffff) + (x >> 32); @@ -32,22 +32,17 @@ * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented. */ -unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, - unsigned short len, - unsigned short proto, - unsigned int sum) +unsigned short int +csum_tcpudp_magic (unsigned long saddr, unsigned long daddr, unsigned short len, + unsigned short proto, unsigned int sum) { - return ~from64to16(saddr + daddr + sum + - ((unsigned long) ntohs(len) << 16) + - ((unsigned long) proto << 8)); + return ~from64to16(saddr + daddr + sum + ((unsigned long) ntohs(len) << 16) + + ((unsigned long) proto << 8)); } -unsigned int csum_tcpudp_nofold(unsigned long saddr, - unsigned long daddr, - unsigned short len, - unsigned short proto, - unsigned int sum) +unsigned int +csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short len, + unsigned short proto, unsigned int sum) { unsigned long result; @@ -66,15 +61,6 @@ extern unsigned long do_csum (const unsigned char *, long); /* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. - */ -unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) -{ - return ~do_csum(iph, ihl*4); -} - -/* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * @@ -86,7 +72,8 @@ * * it's best to have buff aligned on a 32-bit boundary */ -unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) +unsigned int +csum_partial (const unsigned char * buff, int len, unsigned int sum) { unsigned long result = do_csum(buff, len); @@ -102,7 +89,8 @@ * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ -unsigned short ip_compute_csum(unsigned char * buff, int len) +unsigned short +ip_compute_csum (unsigned char * buff, int len) { return ~do_csum(buff,len); } diff -Nru a/arch/ia64/lib/copy_page.S b/arch/ia64/lib/copy_page.S --- a/arch/ia64/lib/copy_page.S Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/lib/copy_page.S Fri Apr 26 00:01:27 2002 @@ -30,6 +30,7 @@ #define tgt2 r23 #define srcf r24 #define tgtf r25 +#define tgt_last r26 #define Nrot ((8*PIPE_DEPTH+7)&~7) @@ -55,18 +56,21 @@ mov src1=in1 adds src2=8,in1 + mov tgt_last = PAGE_SIZE ;; adds tgt2=8,in0 add srcf=512,in1 mov ar.lc=lcount mov tgt1=in0 add tgtf=512,in0 + add tgt_last = tgt_last, in0 ;; 1: (p[0]) ld8 t1[0]=[src1],16 (EPI) st8 [tgt1]=t1[PIPE_DEPTH-1],16 (p[0]) ld8 t2[0]=[src2],16 (EPI) st8 [tgt2]=t2[PIPE_DEPTH-1],16 + cmp.ltu p6,p0 = tgtf, tgt_last ;; (p[0]) ld8 t3[0]=[src1],16 (EPI) st8 [tgt1]=t3[PIPE_DEPTH-1],16 @@ -83,8 +87,8 @@ (p[0]) ld8 t8[0]=[src2],16 (EPI) st8 [tgt2]=t8[PIPE_DEPTH-1],16 - lfetch [srcf], 64 - lfetch [tgtf], 64 +(p6) lfetch [srcf], 64 +(p6) lfetch [tgtf], 64 br.ctop.sptk.few 1b ;; mov pr=saved_pr,0xffffffffffff0000 // restore predicates diff -Nru a/arch/ia64/lib/do_csum.S b/arch/ia64/lib/do_csum.S --- a/arch/ia64/lib/do_csum.S Fri Apr 26 00:01:26 2002 +++ b/arch/ia64/lib/do_csum.S Fri Apr 26 00:01:26 2002 @@ -11,6 +11,9 @@ * Copyright (C) 1999, 2001-2002 Hewlett-Packard Co * Stephane Eranian * + * 02/04/22 Ken Chen + * Data locality study on the checksum buffer. + * More optimization cleanup - remove excessive stop bits. * 02/04/08 David Mosberger * More cleanup and tuning. * 01/04/18 Jun Nakajima @@ -80,6 +83,12 @@ // type of packet or alignment we get. Like the ip_fast_csum() routine // where we know we have at least 20bytes worth of data to checksum. // - Do a better job of handling small packets. +// - Note on prefetching: it was found that under various load, i.e. ftp read/write, +// nfs read/write, the L1 cache hit rate is at 60% and L2 cache hit rate is at 99.8% +// on the data that buffer points to (partly because the checksum is often preceded by +// a copy_from_user()). This finding indiate that lfetch will not be beneficial since +// the data is already in the cache. +// #define saved_pfs r11 #define hmask r16 @@ -117,7 +126,7 @@ GLOBAL_ENTRY(do_csum) .prologue .save ar.pfs, saved_pfs - alloc saved_pfs=ar.pfs,2,16,1,16 + alloc saved_pfs=ar.pfs,2,16,0,16 .rotr word1[4], word2[4],result1[LOAD_LATENCY+2],result2[LOAD_LATENCY+2] .rotp p[PIPE_DEPTH], pC1[2], pC2[2] mov ret0=r0 // in case we have zero length @@ -197,22 +206,21 @@ // Calculate the checksum loading two 8-byte words per loop. // .do_csum16: - mov saved_lc=ar.lc shr.u count=count,1 // we do 16 bytes per loop + brp.loop.imp 1f,2f ;; cmp.eq p9,p10=r0,count // if (count == 0) adds count=-1,count - brp.loop.imp 1f,2f - ;; mov ar.ec=PIPE_DEPTH - mov ar.lc=count // set lc - // result1[0] must be initialized in advance. - mov result2[0]=r0 - mov pr.rot=1<<16 mov carry1=r0 mov carry2=r0 add first2=8,first1 + ;; + mov ar.lc=count // set lc + mov pr.rot=1<<16 + // result1[0] must be initialized in advance. + mov result2[0]=r0 (p9) br.cond.sptk .do_csum_exit ;; .align 32 @@ -223,7 +231,7 @@ (pC2[1])adds carry2=1,carry2 (ELD) add result1[LOAD_LATENCY-1]=result1[LOAD_LATENCY],word1[LOAD_LATENCY] (ELD) add result2[LOAD_LATENCY-1]=result2[LOAD_LATENCY],word2[LOAD_LATENCY] -[2:] +2: (p[0]) ld8 word1[0]=[first1],16 (p[0]) ld8 word2[0]=[first2],16 br.ctop.sptk 1b diff -Nru a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/arch/ia64/lib/ip_fast_csum.S Fri Apr 26 00:01:27 2002 @@ -0,0 +1,90 @@ +/* + * Optmized version of the ip_fast_csum() function + * Used for calculating IP header checksum + * + * Return: 16bit checksum, complemented + * + * Inputs: + * in0: address of buffer to checksum (char *) + * in1: length of the buffer (int) + * + * Copyright (C) 2002 Intel Corp. + * Copyright (C) 2002 Ken Chen + */ + +#include + +/* + * Since we know that most likely this function is called with buf aligned + * on 4-byte boundary and 20 bytes in length, we can execution rather quickly + * versus calling generic version of do_csum, which has lots of overhead in + * handling various alignments and sizes. However, due to lack of constrains + * put on the function input argument, cases with alignment not on 4-byte or + * size not equal to 20 bytes will be handled by the generic do_csum function. + */ + +#define in0 r32 +#define in1 r33 +#define ret0 r8 + +GLOBAL_ENTRY(ip_fast_csum) + .prologue + .body + cmp.ne p6,p7=5,in1 // size other than 20 byte? + and r14=3,in0 // is it aligned on 4-byte? + add r15=4,in0 // second source pointer + ;; + cmp.ne.or.andcm p6,p7=r14,r0 + ;; +(p7) ld4 r20=[in0],8 +(p7) ld4 r21=[r15],8 +(p6) br.spnt .generic + ;; + ld4 r22=[in0],8 + ld4 r23=[r15],8 + ;; + ld4 r24=[in0] + add r20=r20,r21 + add r22=r22,r23 + ;; + add r20=r20,r22 + ;; + add r20=r20,r24 + ;; + shr.u ret0=r20,16 // now need to add the carry + zxt2 r20=r20 + ;; + add r20=ret0,r20 + ;; + shr.u ret0=r20,16 // add carry again + zxt2 r20=r20 + ;; + add r20=ret0,r20 + ;; + shr.u ret0=r20,16 + zxt2 r20=r20 + ;; + add r20=ret0,r20 + ;; + andcm ret0=-1,r20 + .restore sp // reset frame state + br.ret.sptk.many b0 + ;; + +.generic: + .prologue + .save ar.pfs, r35 + alloc r35=ar.pfs,2,2,2,0 + .save rp, r34 + mov r34=b0 + .body + dep.z out1=in1,2,30 + mov out0=in0 + ;; + br.call.sptk.many b0=do_csum + ;; + andcm ret0=-1,ret0 + mov ar.pfs=r35 + mov b0=r34 + br.ret.sptk.many b0 +END(ip_fast_csum) diff -Nru a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S --- a/arch/ia64/lib/memset.S Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/lib/memset.S Fri Apr 26 00:01:27 2002 @@ -1,123 +1,362 @@ -/* - * - * Optimized version of the standard memset() function - * - * Return: none - * - * Inputs: - * in0: address of buffer - * in1: byte value to use for storing - * in2: length of the buffer - * - * Copyright (C) 1999, 2001, 2002 Hewlett-Packard Co - * Stephane Eranian - */ +/* Optimized version of the standard memset() function. + + Copyright (c) 2002 Hewlett-Packard Co/CERN + Sverre Jarp + + Return: dest + + Inputs: + in0: dest + in1: value + in2: count + + The algorithm is fairly straightforward: set byte by byte until we + we get to a 16B-aligned address, then loop on 128 B chunks using an + early store as prefetching, then loop on 32B chucks, then clear remaining + words, finally clear remaining bytes. + Since a stf.spill f0 can store 16B in one go, we use this instruction + to get peak speed when value = 0. */ #include +#undef ret + +#define dest in0 +#define value in1 +#define cnt in2 -// arguments -// -#define buf r32 -#define val r33 -#define len r34 - -// -// local registers -// -#define saved_pfs r14 -#define cnt r18 -#define buf2 r19 -#define saved_lc r20 -#define tmp r21 +#define tmp r31 +#define save_lc r30 +#define ptr0 r29 +#define ptr1 r28 +#define ptr2 r27 +#define ptr3 r26 +#define ptr9 r24 +#define loopcnt r23 +#define linecnt r22 +#define bytecnt r21 -GLOBAL_ENTRY(__bzero) +#define fvalue f6 + +// This routine uses only scratch predicate registers (p6 - p15) +#define p_scr p6 // default register for same-cycle branches +#define p_nz p7 +#define p_zr p8 +#define p_unalgn p9 +#define p_y p11 +#define p_n p12 +#define p_yy p13 +#define p_nn p14 + +#define MIN1 15 +#define MIN1P1HALF 8 +#define LINE_SIZE 128 +#define LSIZE_SH 7 // shift amount +#define PREF_AHEAD 8 + +GLOBAL_ENTRY(memset) +{ .mmi .prologue - .save ar.pfs, saved_pfs - alloc saved_pfs=ar.pfs,0,0,3,0 - mov out2=out1 - mov out1=0 - /* FALL THROUGH (explicit NOPs so that next alloc is preceded by stop bit!) */ + alloc tmp = ar.pfs, 3, 0, 0, 0 + .body + lfetch.nt1 [dest] // + .save ar.lc, save_lc + mov.i save_lc = ar.lc +} { .mmi + mov ret0 = dest // return value + cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero + cmp.eq p_scr, p0 = cnt, r0 +;; } +{ .mmi + and ptr2 = -(MIN1+1), dest // aligned address + and tmp = MIN1, dest // prepare to check for correct alignment + tbit.nz p_y, p_n = dest, 0 // Do we have an odd address? (M_B_U) +} { .mib + mov ptr1 = dest + mux1 value = value, @brcst // create 8 identical bytes in word +(p_scr) br.ret.dpnt.many rp // return immediately if count = 0 +;; } +{ .mib + cmp.ne p_unalgn, p0 = tmp, r0 // +} { .mib + sub bytecnt = (MIN1+1), tmp // NB: # of bytes to move is 1 higher than loopcnt + cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? +(p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) +;; } +{ .mmi +(p_unalgn) add ptr1 = (MIN1+1), ptr2 // after alignment +(p_unalgn) add ptr2 = MIN1P1HALF, ptr2 // after alignment +(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3 // should we do a st8 ? +;; } +{ .mib +(p_y) add cnt = -8, cnt // +(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2 // should we do a st4 ? +} { .mib +(p_y) st8 [ptr2] = value,-4 // +(p_n) add ptr2 = 4, ptr2 // +;; } +{ .mib +(p_yy) add cnt = -4, cnt // +(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1 // should we do a st2 ? +} { .mib +(p_yy) st4 [ptr2] = value,-2 // +(p_nn) add ptr2 = 2, ptr2 // +;; } +{ .mmi + mov tmp = LINE_SIZE+1 // for compare +(p_y) add cnt = -2, cnt // +(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0 // should we do a st1 ? +} { .mmi + setf.sig fvalue=value // transfer value to FLP side +(p_y) st2 [ptr2] = value,-1 // +(p_n) add ptr2 = 1, ptr2 // +;; } + +{ .mmi +(p_yy) st1 [ptr2] = value // + cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? +} { .mbb +(p_yy) add cnt = -1, cnt // +(p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few +;; } + +{ .mib nop.m 0 - nop.f 0 - nop.i 0 - ;; -END(__bzero) -GLOBAL_ENTRY(__memset_generic) - .prologue - .save ar.pfs, saved_pfs - alloc saved_pfs=ar.pfs,3,0,0,0 // cnt is sink here - cmp.eq p8,p0=r0,len // check for zero length - .save ar.lc, saved_lc - mov saved_lc=ar.lc // preserve ar.lc (slow) - ;; + shr.u linecnt = cnt, LSIZE_SH +(p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill +;; } - .body + .align 32 // -------------------------- // L1A: store ahead into cache lines; fill later +{ .mmi + and tmp = -(LINE_SIZE), cnt // compute end of range + mov ptr9 = ptr1 // used for prefetching + and cnt = (LINE_SIZE-1), cnt // remainder +} { .mmi + mov loopcnt = PREF_AHEAD-1 // default prefetch loop + cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value +;; } +{ .mmi +(p_scr) add loopcnt = -1, linecnt // + add ptr2 = 8, ptr1 // start of stores (beyond prefetch stores) + add ptr1 = tmp, ptr1 // first address beyond total range +;; } +{ .mmi + add tmp = -1, linecnt // next loop count + mov.i ar.lc = loopcnt // +;; } +.pref_l1a: +{ .mib + stf8 [ptr9] = fvalue, 128 // Do stores one cache line apart + nop.i 0 + br.cloop.dptk.few .pref_l1a +;; } +{ .mmi + add ptr0 = 16, ptr2 // Two stores in parallel + mov.i ar.lc = tmp // +;; } +.l1ax: + { .mmi + stf8 [ptr2] = fvalue, 8 + stf8 [ptr0] = fvalue, 8 + ;; } + { .mmi + stf8 [ptr2] = fvalue, 24 + stf8 [ptr0] = fvalue, 24 + ;; } + { .mmi + stf8 [ptr2] = fvalue, 8 + stf8 [ptr0] = fvalue, 8 + ;; } + { .mmi + stf8 [ptr2] = fvalue, 24 + stf8 [ptr0] = fvalue, 24 + ;; } + { .mmi + stf8 [ptr2] = fvalue, 8 + stf8 [ptr0] = fvalue, 8 + ;; } + { .mmi + stf8 [ptr2] = fvalue, 24 + stf8 [ptr0] = fvalue, 24 + ;; } + { .mmi + stf8 [ptr2] = fvalue, 8 + stf8 [ptr0] = fvalue, 32 + cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? + ;; } +{ .mmb + stf8 [ptr2] = fvalue, 24 +(p_scr) stf8 [ptr9] = fvalue, 128 + br.cloop.dptk.few .l1ax +;; } +{ .mbb + cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? +(p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2 + br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 +;; } + + .align 32 +.l1b: // ------------------------------------ // L1B: store ahead into cache lines; fill later +{ .mmi + and tmp = -(LINE_SIZE), cnt // compute end of range + mov ptr9 = ptr1 // used for prefetching + and cnt = (LINE_SIZE-1), cnt // remainder +} { .mmi + mov loopcnt = PREF_AHEAD-1 // default prefetch loop + cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value +;; } +{ .mmi +(p_scr) add loopcnt = -1, linecnt + add ptr2 = 16, ptr1 // start of stores (beyond prefetch stores) + add ptr1 = tmp, ptr1 // first address beyond total range +;; } +{ .mmi + add tmp = -1, linecnt // next loop count + mov.i ar.lc = loopcnt +;; } +.pref_l1b: +{ .mib + stf.spill [ptr9] = f0, 128 // Do stores one cache line apart + nop.i 0 + br.cloop.dptk.few .pref_l1b +;; } +{ .mmi + add ptr0 = 16, ptr2 // Two stores in parallel + mov.i ar.lc = tmp +;; } +.l1bx: + { .mmi + stf.spill [ptr2] = f0, 32 + stf.spill [ptr0] = f0, 32 + ;; } + { .mmi + stf.spill [ptr2] = f0, 32 + stf.spill [ptr0] = f0, 32 + ;; } + { .mmi + stf.spill [ptr2] = f0, 32 + stf.spill [ptr0] = f0, 64 + cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? + ;; } +{ .mmb + stf.spill [ptr2] = f0, 32 +(p_scr) stf.spill [ptr9] = f0, 128 + br.cloop.dptk.few .l1bx +;; } +{ .mib + cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? +(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // +;; } - adds tmp=-1,len // br.ctop is repeat/until - tbit.nz p6,p0=buf,0 // odd alignment -(p8) br.ret.spnt.many rp - - cmp.lt p7,p0=16,len // if len > 16 then long memset - mux1 val=val,@brcst // prepare value -(p7) br.cond.dptk .long_memset - ;; - mov ar.lc=tmp // initialize lc for small count - ;; // avoid RAW and WAW on ar.lc -1: // worst case 15 cyles, avg 8 cycles - st1 [buf]=val,1 - br.cloop.dptk.few 1b - ;; // avoid RAW on ar.lc - mov ar.lc=saved_lc - mov ar.pfs=saved_pfs - br.ret.sptk.many rp // end of short memset - - // at this point we know we have more than 16 bytes to copy - // so we focus on alignment -.long_memset: -(p6) st1 [buf]=val,1 // 1-byte aligned -(p6) adds len=-1,len;; // sync because buf is modified - tbit.nz p6,p0=buf,1 - ;; -(p6) st2 [buf]=val,2 // 2-byte aligned -(p6) adds len=-2,len;; - tbit.nz p6,p0=buf,2 - ;; -(p6) st4 [buf]=val,4 // 4-byte aligned -(p6) adds len=-4,len;; - tbit.nz p6,p0=buf,3 - ;; -(p6) st8 [buf]=val,8 // 8-byte aligned -(p6) adds len=-8,len;; - shr.u cnt=len,4 // number of 128-bit (2x64bit) words - ;; - cmp.eq p6,p0=r0,cnt - adds tmp=-1,cnt -(p6) br.cond.dpnt .dotail // we have less than 16 bytes left - ;; - adds buf2=8,buf // setup second base pointer - mov ar.lc=tmp - ;; -2: // 16bytes/iteration - st8 [buf]=val,16 - st8 [buf2]=val,16 - br.cloop.dptk.few 2b - ;; -.dotail: // tail correction based on len only - tbit.nz p6,p0=len,3 - ;; -(p6) st8 [buf]=val,8 // at least 8 bytes - tbit.nz p6,p0=len,2 - ;; -(p6) st4 [buf]=val,4 // at least 4 bytes - tbit.nz p6,p0=len,1 - ;; -(p6) st2 [buf]=val,2 // at least 2 bytes - tbit.nz p6,p0=len,0 - mov ar.lc=saved_lc - ;; -(p6) st1 [buf]=val // only 1 byte left +.fraction_of_line: +{ .mib + add ptr2 = 16, ptr1 + shr.u loopcnt = cnt, 5 // loopcnt = cnt / 32 +;; } +{ .mib + cmp.eq p_scr, p0 = loopcnt, r0 + add loopcnt = -1, loopcnt +(p_scr) br.cond.dpnt.many .store_words +;; } +{ .mib + and cnt = 0x1f, cnt // compute the remaining cnt + mov.i ar.lc = loopcnt +;; } + .align 32 +.l2: // ------------------------------------ // L2A: store 32B in 2 cycles +{ .mmb + stf8 [ptr1] = fvalue, 8 + stf8 [ptr2] = fvalue, 8 +;; } { .mmb + stf8 [ptr1] = fvalue, 24 + stf8 [ptr2] = fvalue, 24 + br.cloop.dptk.many .l2 +;; } +.store_words: +{ .mib + cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? +(p_scr) br.cond.dpnt.many .move_bytes_from_alignment // Branch +;; } + +{ .mmi + stf8 [ptr1] = fvalue, 8 // store + cmp.le p_y, p_n = 16, cnt + add cnt = -8, cnt // subtract +;; } +{ .mmi +(p_y) stf8 [ptr1] = fvalue, 8 // store +(p_y) cmp.le.unc p_yy, p_nn = 16, cnt +(p_y) add cnt = -8, cnt // subtract +;; } +{ .mmi // store +(p_yy) stf8 [ptr1] = fvalue, 8 +(p_yy) add cnt = -8, cnt // subtract +;; } + +.move_bytes_from_alignment: +{ .mib + cmp.eq p_scr, p0 = cnt, r0 + tbit.nz.unc p_y, p0 = cnt, 2 // should we terminate with a st4 ? +(p_scr) br.cond.dpnt.few .restore_and_exit +;; } +{ .mib +(p_y) st4 [ptr1] = value,4 + tbit.nz.unc p_yy, p0 = cnt, 1 // should we terminate with a st2 ? +;; } +{ .mib +(p_yy) st2 [ptr1] = value,2 + tbit.nz.unc p_y, p0 = cnt, 0 // should we terminate with a st1 ? +;; } + +{ .mib +(p_y) st1 [ptr1] = value +;; } +.restore_and_exit: +{ .mib + nop.m 0 + mov.i ar.lc = save_lc br.ret.sptk.many rp -END(__memset_generic) +;; } - .global memset -memset = __memset_generic // alias needed for gcc +.move_bytes_unaligned: +{ .mmi + .pred.rel "mutex",p_y, p_n + .pred.rel "mutex",p_yy, p_nn +(p_n) cmp.le p_yy, p_nn = 4, cnt +(p_y) cmp.le p_yy, p_nn = 5, cnt +(p_n) add ptr2 = 2, ptr1 +} { .mmi +(p_y) add ptr2 = 3, ptr1 +(p_y) st1 [ptr1] = value, 1 // fill 1 (odd-aligned) byte [15, 14 (or less) left] +(p_y) add cnt = -1, cnt +;; } +{ .mmi +(p_yy) cmp.le.unc p_y, p0 = 8, cnt + add ptr3 = ptr1, cnt // prepare last store + mov.i ar.lc = save_lc +} { .mmi +(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes +(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [11, 10 (o less) left] +(p_yy) add cnt = -4, cnt +;; } +{ .mmi +(p_y) cmp.le.unc p_yy, p0 = 8, cnt + add ptr3 = -1, ptr3 // last store + tbit.nz p_scr, p0 = cnt, 1 // will there be a st2 at the end ? +} { .mmi +(p_y) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes +(p_y) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [7, 6 (or less) left] +(p_y) add cnt = -4, cnt +;; } +{ .mmi +(p_yy) st2 [ptr1] = value, 4 // fill 2 (aligned) bytes +(p_yy) st2 [ptr2] = value, 4 // fill 2 (aligned) bytes [3, 2 (or less) left] + tbit.nz p_y, p0 = cnt, 0 // will there be a st1 at the end ? +} { .mmi +(p_yy) add cnt = -4, cnt +;; } +{ .mmb +(p_scr) st2 [ptr1] = value // fill 2 (aligned) bytes +(p_y) st1 [ptr3] = value // fill last byte (using ptr3) + br.ret.sptk.many rp +} +END(memset) diff -Nru a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c --- a/arch/ia64/mm/fault.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/mm/fault.c Fri Apr 26 00:01:27 2002 @@ -137,10 +137,13 @@ bad_area: up_read(&mm->mmap_sem); - if (isr & IA64_ISR_SP) { + if ((isr & IA64_ISR_SP) + || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) + { /* - * This fault was due to a speculative load set the "ed" bit in the psr to - * ensure forward progress (target register will get a NaT). + * This fault was due to a speculative load or lfetch.fault, set the "ed" + * bit in the psr to ensure forward progress. (Target register will get a + * NaT for ld.s, lfetch will be canceled.) */ ia64_psr(regs)->ed = 1; return; diff -Nru a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c --- a/arch/ia64/tools/print_offsets.c Fri Apr 26 00:01:27 2002 +++ b/arch/ia64/tools/print_offsets.c Fri Apr 26 00:01:27 2002 @@ -143,6 +143,7 @@ { "IA64_SWITCH_STACK_AR_RNAT_OFFSET", offsetof (struct switch_stack, ar_rnat) }, { "IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET", offsetof (struct switch_stack, ar_bspstore) }, { "IA64_SWITCH_STACK_PR_OFFSET", offsetof (struct switch_stack, pr) }, + { "IA64_SIGCONTEXT_IP_OFFSET", offsetof (struct sigcontext, sc_ip) }, { "IA64_SIGCONTEXT_AR_BSP_OFFSET", offsetof (struct sigcontext, sc_ar_bsp) }, { "IA64_SIGCONTEXT_AR_FPSR_OFFSET", offsetof (struct sigcontext, sc_ar_fpsr) }, { "IA64_SIGCONTEXT_AR_RNAT_OFFSET", offsetof (struct sigcontext, sc_ar_rnat) }, diff -Nru a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c --- a/arch/parisc/kernel/traps.c Fri Apr 26 00:01:27 2002 +++ b/arch/parisc/kernel/traps.c Fri Apr 26 00:01:27 2002 @@ -43,7 +43,6 @@ static inline void console_verbose(void) { - extern int console_loglevel; console_loglevel = 15; } diff -Nru a/drivers/acpi/acpi_bus.c b/drivers/acpi/acpi_bus.c --- a/drivers/acpi/acpi_bus.c Fri Apr 26 00:01:27 2002 +++ b/drivers/acpi/acpi_bus.c Fri Apr 26 00:01:27 2002 @@ -1053,7 +1053,7 @@ if (!cid[0]) return -ENOENT; - if (0 != strstr(cid, device->pnp.hardware_id)) + if (0 != strstr(driver->ids, cid)) return 0; } diff -Nru a/drivers/acpi/acpi_osl.c b/drivers/acpi/acpi_osl.c --- a/drivers/acpi/acpi_osl.c Fri Apr 26 00:01:27 2002 +++ b/drivers/acpi/acpi_osl.c Fri Apr 26 00:01:27 2002 @@ -158,9 +158,9 @@ #else /*CONFIG_ACPI_EFI*/ addr->pointer_type = ACPI_PHYSICAL_POINTER; if (efi.acpi20) - addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) efi.acpi20; + addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) virt_to_phys(efi.acpi20); else if (efi.acpi) - addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) efi.acpi; + addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) virt_to_phys(efi.acpi); else { printk(KERN_ERR PREFIX "System description tables not found\n"); addr->pointer.physical = 0; @@ -174,6 +174,13 @@ acpi_status acpi_os_map_memory(ACPI_PHYSICAL_ADDRESS phys, ACPI_SIZE size, void **virt) { +#ifdef CONFIG_ACPI_EFI + if (EFI_MEMORY_UC & efi_mem_attributes(phys)) { + *virt = ioremap(phys, size); + } else { + *virt = phys_to_virt(phys); + } +#else if (phys > ULONG_MAX) { printk(KERN_ERR PREFIX "Cannot map memory that high\n"); return AE_BAD_PARAMETER; @@ -183,6 +190,7 @@ * ioremap already checks to ensure this is in reserved space */ *virt = ioremap((unsigned long) phys, size); +#endif if (!*virt) return AE_NO_MEMORY; @@ -324,26 +332,41 @@ void *value, u32 width) { - u32 dummy; - + u32 dummy; + int iomem = 0; + void *virt_addr; + +#ifdef CONFIG_ACPI_EFI + if (EFI_MEMORY_UC & efi_mem_attributes(phys_addr)) { + iomem = 1; + virt_addr = ioremap(phys_addr, width); + } else { + virt_addr = phys_to_virt(phys_addr); + } +#else + virt_addr = phys_to_virt(phys_addr); +#endif if (!value) value = &dummy; switch (width) { case 8: - *(u8*) value = *(u8*) phys_to_virt(phys_addr); + *(u8*) value = *(u8*) virt_addr; break; case 16: - *(u16*) value = *(u16*) phys_to_virt(phys_addr); + *(u16*) value = *(u16*) virt_addr; break; case 32: - *(u32*) value = *(u32*) phys_to_virt(phys_addr); + *(u32*) value = *(u32*) virt_addr; break; default: BUG(); } + if (iomem) + iounmap(virt_addr); + return AE_OK; } @@ -353,20 +376,37 @@ acpi_integer value, u32 width) { + int iomem = 0; + void *virt_addr; + +#ifdef CONFIG_ACPI_EFI + if (EFI_MEMORY_UC & efi_mem_attributes(phys_addr)) { + iomem = 1; + virt_addr = ioremap(phys_addr,width); + } else { + virt_addr = phys_to_virt(phys_addr); + } +#else + virt_addr = phys_to_virt(phys_addr); +#endif + switch (width) { case 8: - *(u8*) phys_to_virt(phys_addr) = value; + *(u8*) virt_addr = value; break; case 16: - *(u16*) phys_to_virt(phys_addr) = value; + *(u16*) virt_addr = value; break; case 32: - *(u32*) phys_to_virt(phys_addr) = value; + *(u32*) virt_addr = value; break; default: BUG(); } + + if (iomem) + iounmap(virt_addr); return AE_OK; } diff -Nru a/drivers/acpi/acpi_pci_root.c b/drivers/acpi/acpi_pci_root.c --- a/drivers/acpi/acpi_pci_root.c Fri Apr 26 00:01:27 2002 +++ b/drivers/acpi/acpi_pci_root.c Fri Apr 26 00:01:27 2002 @@ -1,5 +1,5 @@ /* - * acpi_pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 30 $) + * acpi_pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 31 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh @@ -21,6 +21,9 @@ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Copyright (C) 2002 J.I. Lee + * 02/04/18 J.I. Lee Allowed defered prt parsing on PCI root bridges + * that have no immediate prts. */ #include @@ -48,7 +51,7 @@ static int acpi_pci_root_add (struct acpi_device *device); static int acpi_pci_root_remove (struct acpi_device *device, int type); -static int acpi_pci_root_bind (struct acpi_device *device); +static int acpi_pci_bind (struct acpi_device *device); static struct acpi_driver acpi_pci_root_driver = { name: ACPI_PCI_ROOT_DRIVER_NAME, @@ -57,12 +60,13 @@ ops: { add: acpi_pci_root_add, remove: acpi_pci_root_remove, - bind: acpi_pci_root_bind, + bind: acpi_pci_bind, }, }; struct acpi_pci_data { acpi_pci_id id; + struct pci_bus *bus; struct pci_dev *dev; }; @@ -296,7 +300,7 @@ buffer.pointer = pathname; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO PREFIX "%s [%s._PRT]\n", ACPI_PCI_PRT_DEVICE_NAME, + printk(KERN_INFO PREFIX "%s in [%s]\n", ACPI_PCI_PRT_DEVICE_NAME, pathname); /* @@ -307,8 +311,7 @@ buffer.pointer = NULL; status = acpi_get_irq_routing_table(handle, &buffer); if (status != AE_BUFFER_OVERFLOW) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error evaluating _PRT [%s]\n", + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRT [%s]\n", acpi_format_exception(status))); return_VALUE(-ENODEV); } @@ -321,8 +324,7 @@ status = acpi_get_irq_routing_table(handle, &buffer); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error evaluating _PRT [%s]\n", + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRT [%s]\n", acpi_format_exception(status))); kfree(buffer.pointer); return_VALUE(-ENODEV); @@ -407,16 +409,16 @@ static int -acpi_pci_root_bind ( +acpi_pci_bind ( struct acpi_device *device) { int result = 0; acpi_status status = AE_OK; struct acpi_pci_data *data = NULL; - struct acpi_pci_data *parent_data = NULL; + struct acpi_pci_data *pdata = NULL; acpi_handle handle = NULL; - ACPI_FUNCTION_TRACE("acpi_pci_root_bind"); + ACPI_FUNCTION_TRACE("acpi_pci_bind"); if (!device || !device->parent) return_VALUE(-EINVAL); @@ -432,12 +434,11 @@ /* * Segment & Bus * ------------- - * These are obtained via the parent device's ACPI-PCI context.. - * Note that PCI root bridge devices don't have a 'dev->subordinate'. + * These are obtained via the parent device's ACPI-PCI context. */ status = acpi_get_data(device->parent->handle, acpi_pci_data_handler, - (void**) &parent_data); - if (ACPI_FAILURE(status) || !parent_data || !parent_data->dev) { + (void**) &pdata); + if (ACPI_FAILURE(status) || !pdata || !pdata->bus) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid ACPI-PCI context for parent device %s\n", acpi_device_bid(device->parent))); @@ -445,19 +446,9 @@ goto end; } - data->id.segment = parent_data->id.segment; + data->id.segment = pdata->id.segment; - if (parent_data->dev->subordinate) /* e.g. PCI-PCI bridge */ - data->id.bus = parent_data->dev->subordinate->number; - else if (parent_data->dev->bus) /* PCI root bridge */ - data->id.bus = parent_data->dev->bus->number; - else { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Parent device %s is not a PCI bridge\n", - acpi_device_bid(device->parent))); - result = -ENODEV; - goto end; - } + data->id.bus = pdata->bus->number; /* * Device & Function @@ -474,6 +465,10 @@ data->id.segment, data->id.bus, data->id.device, data->id.function)); + /* + * TBD: Support slot devices (e.g. function=0xFFFF). + */ + /* * Locate PCI Device * ----------------- @@ -501,6 +496,21 @@ } /* + * PCI Bridge? + * ----------- + * If so, set the 'bus' field and install the 'bind' function to + * facilitate callbacks for all of its children. + */ + if (data->dev->subordinate) { + data->bus = data->dev->subordinate; + device->ops.bind = acpi_pci_bind; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Device %02x:%02x:%02x.%02x is a PCI bridge\n", + data->id.segment, data->id.bus, + data->id.device, data->id.function)); + } + + /* * Attach ACPI-PCI Context * ----------------------- * Thus binding the ACPI and PCI devices. @@ -515,15 +525,6 @@ } /* - * PCI Bridge? - * ----------- - * If so, install the 'bind' function to facilitate callbacks for - * all of its children. - */ - if (data->dev->subordinate) - device->ops.bind = acpi_pci_root_bind; - - /* * PCI Routing Table * ----------------- * Evaluate and parse _PRT, if exists. This code is independent of @@ -535,9 +536,9 @@ */ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); if (ACPI_SUCCESS(status)) { - if (data->dev->subordinate) /* PCI-PCI bridge */ + if (data->bus) /* PCI-PCI bridge */ acpi_prt_parse(device->handle, data->id.segment, - data->dev->subordinate->number); + data->bus->number); else /* non-bridge PCI device */ acpi_prt_parse(device->handle, data->id.segment, data->id.bus); @@ -563,6 +564,7 @@ struct acpi_pci_root *root = NULL; acpi_status status = AE_OK; unsigned long value = 0; + acpi_handle handle = NULL; ACPI_FUNCTION_TRACE("acpi_pci_root_add"); @@ -582,7 +584,7 @@ /* * TBD: Doesn't the bus driver automatically set this? */ - device->ops.bind = acpi_pci_root_bind; + device->ops.bind = acpi_pci_bind; /* * Segment @@ -596,7 +598,8 @@ root->data.id.segment = (u16) value; break; case AE_NOT_FOUND: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming segment 0 (no _SEG)\n")); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Assuming segment 0 (no _SEG)\n")); root->data.id.segment = 0; break; default: @@ -639,10 +642,9 @@ * TBD: Need PCI interface for enumeration/configuration of roots. */ - printk(KERN_INFO PREFIX "%s [%s] (%02x:%02x:%02x.%02x)\n", + printk(KERN_INFO PREFIX "%s [%s] (%02x:%02x)\n", acpi_device_name(device), acpi_device_bid(device), - root->data.id.segment, root->data.id.bus, - root->data.id.device, root->data.id.function); + root->data.id.segment, root->data.id.bus); /* * Scan the Root Bridge @@ -651,20 +653,11 @@ * PCI namespace does not get created until this call is made (and * thus the root bridge's pci_dev does not exist). */ - pci_scan_bus(root->data.id.bus, pci_root_ops, NULL); - - /* - * Locate PCI Device - * ----------------- - * Locate the matching PCI root bridge device in the PCI namespace. - */ - root->data.dev = pci_find_slot(root->data.id.bus, - PCI_DEVFN(root->data.id.device, root->data.id.function)); - if (!root->data.dev) { + root->data.bus = pcibios_scan_root(root->data.id.segment, root->data.id.bus); + if (!root->data.bus) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Device %02x:%02x:%02x.%02x not present\n", - root->data.id.segment, root->data.id.bus, - root->data.id.device, root->data.id.function)); + "Bus %02x:%02x not present in PCI namespace\n", + root->data.id.segment, root->data.id.bus)); result = -ENODEV; goto end; } @@ -672,7 +665,8 @@ /* * Attach ACPI-PCI Context * ----------------------- - * Thus binding the ACPI and PCI devices. + * Thus binding the ACPI and PCI devices. Note that PCI root bridges + * never set a 'data.dev' member (rely on 'data.bus' instead). */ status = acpi_attach_data(root->handle, acpi_pci_data_handler, &root->data); @@ -687,9 +681,27 @@ /* * PCI Routing Table * ----------------- - * Evaluate and parse _PRT, if exists. Note that root bridges - * must have a _PRT (optional for subordinate bridges). + * Evaluate and parse _PRT, if exists. Note that root bridges MUST + * have a _PRT (optional for PCI-PCI bridges). + * + */ + + /* + * J.I. + * Some PCI Root Brides can have no immediate _PRTs, + * in that case, _PRTs are buried in child devices. + * So, let's pass even if Root PCI bridge has no immediate _PRT + * and defer the _PRT parsing until we get somewhere down there. */ + status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Deferred _PRT parsing for PCI Root bridge(s:b=0x%x:%x)...\n", + root->data.id.segment, root->data.id.bus)); + //result = 0; + goto end; + } + result = acpi_prt_parse(device->handle, root->data.id.segment, root->data.id.bus); diff -Nru a/drivers/acpi/acpi_system.c b/drivers/acpi/acpi_system.c --- a/drivers/acpi/acpi_system.c Fri Apr 26 00:01:27 2002 +++ b/drivers/acpi/acpi_system.c Fri Apr 26 00:01:27 2002 @@ -177,7 +177,7 @@ #endif /* flush caches */ - wbinvd(); + ACPI_FLUSH_CPU_CACHE(); /* Do arch specific saving of state. */ if (state > ACPI_STATE_S1) { @@ -305,7 +305,7 @@ /* disable interrupts and flush caches */ ACPI_DISABLE_IRQS(); - wbinvd(); + ACPI_FLUSH_CPU_CACHE(); /* perform OS-specific sleep actions */ status = acpi_system_suspend(state); diff -Nru a/drivers/acpi/include/platform/aclinux.h b/drivers/acpi/include/platform/aclinux.h --- a/drivers/acpi/include/platform/aclinux.h Fri Apr 26 00:01:27 2002 +++ b/drivers/acpi/include/platform/aclinux.h Fri Apr 26 00:01:27 2002 @@ -42,7 +42,7 @@ #define strtoul simple_strtoul -#ifdef _IA64 +#ifdef CONFIG_IA64 #define ACPI_FLUSH_CPU_CACHE() #else #define ACPI_FLUSH_CPU_CACHE() wbinvd() diff -Nru a/drivers/char/Config.help b/drivers/char/Config.help --- a/drivers/char/Config.help Fri Apr 26 00:01:27 2002 +++ b/drivers/char/Config.help Fri Apr 26 00:01:27 2002 @@ -127,6 +127,10 @@ 815 and 830m chipset boards for their on-board integrated graphics. This is required to do any useful video modes with these boards. +CONFIG_AGP_I460 + This option gives you AGP GART support for the Intel 460GX chipset + for IA64 processors. + CONFIG_AGP_VIA This option gives you AGP support for the GLX component of the XFree86 4.x on VIA MPV3/Apollo Pro chipsets. @@ -169,6 +173,10 @@ You should say Y here if you use XFree86 3.3.6 or 4.x and want to use GLX or DRI. If unsure, say N. + +CONFIG_AGP_HP_ZX1 + This option gives you AGP GART support for the HP ZX1 chipset + for IA64 processors. CONFIG_I810_TCO Hardware driver for the TCO timer built into the Intel i810 and i815 diff -Nru a/drivers/char/Config.in b/drivers/char/Config.in --- a/drivers/char/Config.in Fri Apr 26 00:01:26 2002 +++ b/drivers/char/Config.in Fri Apr 26 00:01:26 2002 @@ -209,6 +209,10 @@ dep_tristate '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP if [ "$CONFIG_AGP" != "n" ]; then bool ' Intel 440LX/BX/GX and I815/I820/I830M/I840/I845/I850/I860 support' CONFIG_AGP_INTEL + if [ "$CONFIG_IA64" = "y" ]; then + bool ' Intel 460GX support' CONFIG_AGP_I460 + bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 + fi bool ' Intel I810/I815/I830M (on-board) support' CONFIG_AGP_I810 bool ' VIA chipset support' CONFIG_AGP_VIA bool ' AMD Irongate, 761, and 762 support' CONFIG_AGP_AMD diff -Nru a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h --- a/drivers/char/agp/agp.h Fri Apr 26 00:01:27 2002 +++ b/drivers/char/agp/agp.h Fri Apr 26 00:01:27 2002 @@ -84,8 +84,8 @@ void *dev_private_data; struct pci_dev *dev; gatt_mask *masks; - unsigned long *gatt_table; - unsigned long *gatt_table_real; + u32 *gatt_table; + u32 *gatt_table_real; unsigned long scratch_page; unsigned long gart_bus_addr; unsigned long gatt_bus_addr; @@ -99,7 +99,6 @@ int needs_scratch_page; int aperture_size_idx; int num_aperture_sizes; - int num_of_masks; int capndx; int cant_use_aperture; @@ -111,6 +110,7 @@ void (*cleanup) (void); void (*tlb_flush) (agp_memory *); unsigned long (*mask_memory) (unsigned long, int); + unsigned long (*unmask_memory) (unsigned long); void (*cache_flush) (void); int (*create_gatt_table) (void); int (*free_gatt_table) (void); @@ -125,10 +125,12 @@ }; +#define OUTREG64(mmap, addr, val) __raw_writeq((val), (mmap)+(addr)) #define OUTREG32(mmap, addr, val) __raw_writel((val), (mmap)+(addr)) #define OUTREG16(mmap, addr, val) __raw_writew((val), (mmap)+(addr)) #define OUTREG8(mmap, addr, val) __raw_writeb((val), (mmap)+(addr)) +#define INREG64(mmap, addr) __raw_readq((mmap)+(addr)) #define INREG32(mmap, addr) __raw_readl((mmap)+(addr)) #define INREG16(mmap, addr) __raw_readw((mmap)+(addr)) #define INREG8(mmap, addr) __raw_readb((mmap)+(addr)) @@ -221,6 +223,9 @@ #ifndef PCI_DEVICE_ID_INTEL_82443GX_1 #define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 #endif +#ifndef PCI_DEVICE_ID_INTEL_460GX +#define PCI_DEVICE_ID_INTEL_460GX 0x84ea +#endif #ifndef PCI_DEVICE_ID_AMD_IRONGATE_0 #define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 #endif @@ -263,6 +268,15 @@ #define INTEL_NBXCFG 0x50 #define INTEL_ERRSTS 0x91 +/* Intel 460GX Registers */ +#define INTEL_I460_APBASE 0x10 +#define INTEL_I460_BAPBASE 0x98 +#define INTEL_I460_GXBCTL 0xa0 +#define INTEL_I460_AGPSIZ 0xa2 +#define INTEL_I460_ATTBASE 0xfe200000 +#define INTEL_I460_GATT_VALID (1UL << 24) +#define INTEL_I460_GATT_COHERENT (1UL << 25) + /* intel i830 registers */ #define I830_GMCH_CTRL 0x52 #define I830_GMCH_ENABLED 0x4 @@ -374,5 +388,14 @@ #define SVWRKS_TLBFLUSH 0x10 #define SVWRKS_POSTFLUSH 0x14 #define SVWRKS_DIRFLUSH 0x0c + +/* HP ZX1 SBA registers */ +#define HP_ZX1_CTRL 0x200 +#define HP_ZX1_IBASE 0x300 +#define HP_ZX1_IMASK 0x308 +#define HP_ZX1_PCOM 0x310 +#define HP_ZX1_TCNFG 0x318 +#define HP_ZX1_PDIR_BASE 0x320 +#define HP_ZX1_CACHE_FLUSH 0x428 #endif /* _AGP_BACKEND_PRIV_H */ diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c --- a/drivers/char/agp/agpgart_be.c Fri Apr 26 00:01:26 2002 +++ b/drivers/char/agp/agpgart_be.c Fri Apr 26 00:01:26 2002 @@ -17,11 +17,12 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * + * 460GX support by Chris Ahna */ #include #include @@ -43,6 +44,9 @@ #include #include #include +#include +#include +#include #include #include "agp.h" @@ -59,33 +63,44 @@ EXPORT_SYMBOL(agp_backend_acquire); EXPORT_SYMBOL(agp_backend_release); -static void flush_cache(void); +static void flush_cache (void); + +/* Declare these with attribute unused so the compiler doesn't complain if the + routines are not used. It would be even better if they weren't compiled + into the kernel at all. */ +static void agp_generic_agp_enable (u32) __attribute__((unused)); +static int agp_generic_create_gatt_table (void) __attribute__((unused)); +static int agp_generic_suspend (void) __attribute__((unused)); +static void agp_generic_resume (void) __attribute__((unused)); +static int agp_generic_free_gatt_table (void) __attribute__((unused)); +static int agp_generic_insert_memory (agp_memory *, off_t, int) __attribute__((unused)); +static int agp_generic_remove_memory (agp_memory *, off_t, int) __attribute__((unused)); +static agp_memory *agp_generic_alloc_by_type (size_t, int) __attribute__((unused)); +static void agp_generic_free_by_type (agp_memory *) __attribute__((unused)); +static unsigned long agp_generic_alloc_page (void) __attribute__((unused)); +static void agp_generic_destroy_page (unsigned long) __attribute__((unused)); +static unsigned long agp_generic_unmask_memory (unsigned long) __attribute__((unused)); + static struct agp_bridge_data agp_bridge; static int agp_try_unsupported __initdata = 0; +#if defined(__alpha__) || defined(__ia64__) || defined(__sparc__) -static inline void flush_cache(void) +static inline void flush_cache (void) { -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__) - /* ??? I wonder if we'll really need to flush caches, or if the - core logic can manage to keep the system coherent. The ARM - speaks only of using `cflush' to get things in memory in - preparation for power failure. + mb(); +} - If we do need to call `cflush', we'll need a target page, - as we can only flush one page at a time. +#define smp_flush_cache flush_cache - Ditto for IA-64. --davidm 00/08/07 */ - mb(); -#else -#error "Please define flush_cache." -#endif +#elif defined(__i386__) || defined(__x86_64__) + +static inline void flush_cache(void) +{ + asm volatile ("wbinvd":::"memory"); } -#ifdef CONFIG_SMP static atomic_t cpus_waiting; static void ipi_handler(void *null) @@ -105,10 +120,15 @@ while (atomic_read(&cpus_waiting) > 0) barrier(); } -#define global_cache_flush smp_flush_cache -#else /* CONFIG_SMP */ -#define global_cache_flush flush_cache -#endif /* CONFIG_SMP */ +#else +# error "Please define flush_cache." +#endif + +#ifdef CONFIG_SMP +# define global_cache_flush smp_flush_cache +#else +# define global_cache_flush flush_cache +#endif int agp_backend_acquire(void) { @@ -134,13 +154,12 @@ MOD_DEC_USE_COUNT; } -/* +/* * Generic routines for handling agp_memory structures - * They use the basic page allocation routines to do the * brunt of the work. */ - static void agp_free_key(int key) { @@ -205,13 +224,17 @@ agp_bridge.free_by_type(curr); return; } - if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->memory[i] &= ~(0x00000fff); - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(curr->memory[i])); + if (!agp_bridge.cant_use_aperture) { + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + agp_bridge.agp_destroy_page((unsigned long) + phys_to_virt(curr->memory[i])); + } } + } else { + vfree(curr->vmptr); } + agp_free_key(curr->key); vfree(curr->memory); kfree(curr); @@ -247,26 +270,48 @@ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_memory(scratch_pages); - if (new == NULL) { MOD_DEC_USE_COUNT; return NULL; } - for (i = 0; i < page_count; i++) { - new->memory[i] = agp_bridge.agp_alloc_page(); - if (new->memory[i] == 0) { - /* Free this structure */ - agp_free_memory(new); + if (!agp_bridge.cant_use_aperture) { + for (i = 0; i < page_count; i++) { + new->memory[i] = agp_bridge.agp_alloc_page(); + + if (new->memory[i] == 0) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = virt_to_phys((void *) new->memory[i]); + new->page_count++; + } + } else { + void *vmblock, *vaddr; + unsigned long paddr; + struct page *page; + + vmblock = __vmalloc(page_count << PAGE_SHIFT, GFP_KERNEL, PAGE_KERNEL); + if (vmblock == NULL) { + MOD_DEC_USE_COUNT; return NULL; } - new->memory[i] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[i]), - type); - new->page_count++; - } + new->vmptr = vmblock; + vaddr = vmblock; + + for (i = 0; i < page_count; i++, vaddr += PAGE_SIZE) { + page = vmalloc_to_page(vaddr); + if (!page) { + MOD_DEC_USE_COUNT; + return NULL; + } + paddr = virt_to_phys(page_address(page)); + new->memory[i] = paddr; + } + new->page_count = page_count; + } return new; } @@ -307,9 +352,6 @@ void agp_copy_info(agp_kern_info * info) { - unsigned long page_mask = 0; - int i; - memset(info, 0, sizeof(agp_kern_info)); if (agp_bridge.type == NOT_SUPPORTED) { info->chipset = agp_bridge.type; @@ -325,11 +367,7 @@ info->max_memory = agp_bridge.max_memory_agp; info->current_memory = atomic_read(&agp_bridge.current_memory_agp); info->cant_use_aperture = agp_bridge.cant_use_aperture; - - for(i = 0; i < agp_bridge.num_of_masks; i++) - page_mask |= agp_bridge.mask_memory(page_mask, i); - - info->page_mask = ~page_mask; + info->page_mask = ~0UL; } /* End - Routine to copy over information structure */ @@ -359,6 +397,7 @@ } curr->is_bound = TRUE; curr->pg_start = pg_start; + return 0; } @@ -377,6 +416,7 @@ if (ret_val != 0) { return ret_val; } + curr->is_bound = FALSE; curr->pg_start = 0; return 0; @@ -384,12 +424,12 @@ /* End - Routines for handling swapping of agp_memory into the GATT */ -/* +/* * Driver routines - start * Currently this module supports the following chipsets: - * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3, + * i810, i815, 440lx, 440bx, 440gx, 460gx, i830, i840, i845, i850, i860, via vp3, * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541, - * and generic support for the SiS chipsets. + * chipsets. */ /* Generic Agp routines - Start */ @@ -397,7 +437,7 @@ static void agp_generic_agp_enable(u32 mode) { struct pci_dev *device = NULL; - u32 command, scratch; + u32 command, scratch; u8 cap_ptr; pci_read_config_dword(agp_bridge.dev, @@ -414,7 +454,7 @@ cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); if (cap_ptr != 0x00) { /* - * Ok, here we have a AGP device. Disable impossible + * Ok, here we have a AGP device. Disable impossible * settings, and adjust the readqueue to the minimum. */ @@ -551,8 +591,8 @@ case U32_APER_SIZE: agp_bridge.current_size = A_IDX32(); break; - /* This case will never really - * happen. + /* This case will never really + * happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: @@ -581,7 +621,7 @@ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (unsigned long *) table; + agp_bridge.gatt_table_real = (u32 *) table; CACHE_FLUSH(); agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); @@ -713,7 +753,8 @@ mem->is_flushed = TRUE; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - agp_bridge.gatt_table[j] = mem->memory[i]; + agp_bridge.gatt_table[j] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); @@ -752,10 +793,10 @@ kfree(curr); } -/* +/* * Basic Page Allocation Routines - * These routines handle page allocation - * and by default they reserve the allocated + * and by default they reserve the allocated * memory. They also handle incrementing the * current_memory_agp value, Which is checked * against a maximum value. @@ -764,7 +805,7 @@ static unsigned long agp_generic_alloc_page(void) { struct page * page; - + page = alloc_page(GFP_KERNEL); if (page == NULL) return 0; @@ -798,6 +839,11 @@ agp_bridge.agp_enable(mode); } +static unsigned long agp_generic_unmask_memory(unsigned long addr) +{ + return addr & ~0x00000fffUL; +} + /* End - Generic Agp routines */ #ifdef CONFIG_AGP_I810 @@ -937,7 +983,7 @@ agp_bridge.tlb_flush(mem); return 0; } - if((type == AGP_PHYS_MEMORY) && + if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY)) { goto insert; } @@ -948,7 +994,8 @@ CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (j * 4), mem->memory[i]); + I810_PTE_BASE + (j * 4), + agp_bridge.mask_memory(mem->memory[i], mem->type)); } CACHE_FLUSH(); @@ -992,7 +1039,7 @@ MOD_INC_USE_COUNT; return new; } - if(type == AGP_PHYS_MEMORY) { + if (type == AGP_PHYS_MEMORY) { /* The I810 requires a physical address to program * it's mouse pointer into hardware. However the * Xserver still writes to it through the agp @@ -1014,24 +1061,21 @@ agp_free_memory(new); return NULL; } - new->memory[0] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[0]), - type); + new->memory[0] = virt_to_phys((void *) new->memory[0]); new->page_count = 1; new->num_scratch_pages = 1; new->type = AGP_PHYS_MEMORY; new->physical = virt_to_phys((void *) new->memory[0]); return new; } - + return NULL; } static void intel_i810_free_by_type(agp_memory * curr) { agp_free_key(curr->key); - if(curr->type == AGP_PHYS_MEMORY) { + if (curr->type == AGP_PHYS_MEMORY) { agp_bridge.agp_destroy_page((unsigned long) phys_to_virt(curr->memory[0])); vfree(curr->memory); @@ -1051,7 +1095,6 @@ intel_i810_private.i810_dev = i810_dev; agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 2; agp_bridge.aperture_sizes = (void *) intel_i810_sizes; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1062,6 +1105,7 @@ agp_bridge.cleanup = intel_i810_cleanup; agp_bridge.tlb_flush = intel_i810_tlbflush; agp_bridge.mask_memory = intel_i810_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = intel_i810_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1253,7 +1297,8 @@ CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]); + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4), + agp_bridge.mask_memory(mem->memory[i], mem->type)); CACHE_FLUSH(); @@ -1314,7 +1359,7 @@ return(NULL); } - nw->memory[0] = agp_bridge.mask_memory(virt_to_phys((void *) nw->memory[0]),type); + nw->memory[0] = virt_to_phys((void *) nw->memory[0]); nw->page_count = 1; nw->num_scratch_pages = 1; nw->type = AGP_PHYS_MEMORY; @@ -1330,7 +1375,6 @@ intel_i830_private.i830_dev = i830_dev; agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 3; agp_bridge.aperture_sizes = (void *) intel_i830_sizes; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1365,6 +1409,575 @@ #endif /* CONFIG_AGP_I810 */ +#ifdef CONFIG_AGP_I460 + +/* BIOS configures the chipset so that one of two apbase registers are used */ +static u8 intel_i460_dynamic_apbase = 0x10; + +/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ +static u8 intel_i460_pageshift = 12; +static u32 intel_i460_pagesize; + +/* Keep track of which is larger, chipset or kernel page size. */ +static u32 intel_i460_cpk = 1; + +/* Structure for tracking partial use of 4MB GART pages */ +static u32 **i460_pg_detail = NULL; +static u32 *i460_pg_count = NULL; + +#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) +#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) + +#define I460_SRAM_IO_DISABLE (1 << 4) +#define I460_BAPBASE_ENABLE (1 << 3) +#define I460_AGPSIZ_MASK 0x7 +#define I460_4M_PS (1 << 1) + +#define log2(x) ffz(~(x)) + +static inline void intel_i460_read_back (volatile u32 *entry) +{ + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + *entry; +} + +static int intel_i460_fetch_size(void) +{ + int i; + u8 temp; + aper_size_info_8 *values; + + /* Determine the GART page size */ + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); + intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; + intel_i460_pagesize = 1UL << intel_i460_pageshift; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + + /* Exit now if the IO drivers for the GART SRAMS are turned off */ + if (temp & I460_SRAM_IO_DISABLE) { + printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); + printk(KERN_ERR PFX "AGPGART operation not possible\n"); + return 0; + } + + /* Make sure we don't try to create an 2 ^ 23 entry GATT */ + if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); + return 0; + } + + /* Determine the proper APBASE register */ + if (temp & I460_BAPBASE_ENABLE) + intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; + else + intel_i460_dynamic_apbase = INTEL_I460_APBASE; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* + * Dynamically calculate the proper num_entries and page_order values for + * the define aperture sizes. Take care not to shift off the end of + * values[i].size. + */ + values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12); + values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); + } + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* Neglect control bits when matching up size_value */ + if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { + agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* There isn't anything to do here since 460 has no GART TLB. */ +static void intel_i460_tlb_flush(agp_memory * mem) +{ + return; +} + +/* + * This utility function is needed to prevent corruption of the control bits + * which are stored along with the aperture size in 460's AGPSIZ register + */ +static void intel_i460_write_agpsiz(u8 size_value) +{ + u8 temp; + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, + ((temp & ~I460_AGPSIZ_MASK) | size_value)); +} + +static void intel_i460_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + intel_i460_write_agpsiz(previous_size->size_value); + + if (intel_i460_cpk == 0) { + vfree(i460_pg_detail); + vfree(i460_pg_count); + } +} + + +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +static int intel_i460_configure(void) +{ + union { + u32 small[2]; + u64 large; + } temp; + u8 scratch; + int i; + + aper_size_info_8 *current_size; + + temp.large = 0; + + current_size = A_SIZE_8(agp_bridge.current_size); + intel_i460_write_agpsiz(current_size->size_value); + + /* + * Do the necessary rigmarole to read all eight bytes of APBASE. + * This has to be done since the AGP aperture can be above 4GB on + * 460 based systems. + */ + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0])); + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1])); + + /* Clear BAR control bits */ + agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, + (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); + + /* + * Initialize partial allocation trackers if a GART page is bigger than + * a kernel page. + */ + if (I460_CPAGES_PER_KPAGE >= 1) { + intel_i460_cpk = 1; + } else { + intel_i460_cpk = 0; + + i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries); + i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries); + + for (i = 0; i < current_size->num_entries; i++) { + i460_pg_count[i] = 0; + i460_pg_detail[i] = NULL; + } + } + return 0; +} + +static int intel_i460_create_gatt_table(void) +{ + char *table; + int i; + int page_order; + int num_entries; + void *temp; + + /* + * Load up the fixed address of the GART SRAMS which hold our + * GATT table. + */ + table = (char *) __va(INTEL_I460_ATTBASE); + + temp = agp_bridge.current_size; + page_order = A_SIZE_8(temp)->page_order; + num_entries = A_SIZE_8(temp)->num_entries; + + agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + return 0; +} + +static int intel_i460_free_gatt_table(void) +{ + int num_entries; + int i; + void *temp; + + temp = agp_bridge.current_size; + + num_entries = A_SIZE_8(temp)->num_entries; + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + + iounmap(agp_bridge.gatt_table); + return 0; +} + +/* These functions are called when PAGE_SIZE exceeds the GART page size */ + +static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type) +{ + int i, j, k, num_entries; + void *temp; + unsigned long paddr; + + /* + * The rest of the kernel will compute page offsets in terms of + * PAGE_SIZE. + */ + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + +#if 0 + /* not necessary since 460 GART is operated in coherent mode... */ + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } +#endif + + for (i = 0, j = pg_start; i < mem->page_count; i++) { + paddr = mem->memory[i]; + for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) + agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type); + } + + intel_i460_read_back(agp_bridge.gatt_table + j - 1); + return 0; +} + +static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type) +{ + int i; + + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++) + agp_bridge.gatt_table[i] = 0; + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + return 0; +} + +/* + * These functions are called when the GART page size exceeds PAGE_SIZE. + * + * This situation is interesting since AGP memory allocations that are + * smaller than a single GART page are possible. The structures i460_pg_count + * and i460_pg_detail track partial allocation of the large GART pages to + * work around this issue. + * + * i460_pg_count[pg_num] tracks the number of kernel pages in use within + * GART page pg_num. i460_pg_detail[pg_num] is an array containing a + * psuedo-GART entry for each of the aforementioned kernel pages. The whole + * of i460_pg_detail is equivalent to a giant GATT with page size equal to + * that of the kernel. + */ + +static void *intel_i460_alloc_large_page(int pg_num) +{ + int i; + void *bp, *bp_end; + struct page *page; + + i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE); + if (i460_pg_detail[pg_num] == NULL) { + printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); + return NULL; + } + + for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) + i460_pg_detail[pg_num][i] = 0; + + bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT); + if (bp == NULL) { + printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); + return NULL; + } + + bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); + + for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { + atomic_inc(&page->count); + set_bit(PG_locked, &page->flags); + atomic_inc(&agp_bridge.current_memory_agp); + } + return bp; +} + +static void intel_i460_free_large_page(int pg_num, unsigned long addr) +{ + struct page *page; + void *bp, *bp_end; + + bp = (void *) __va(addr); + bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))); + + vfree(i460_pg_detail[pg_num]); + i460_pg_detail[pg_num] = NULL; + + for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { + atomic_dec(&page->count); + clear_bit(PG_locked, &page->flags); + wake_up_page(page); + atomic_dec(&agp_bridge.current_memory_agp); + } + + free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); +} + +static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + + if (end_pg > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + /* Check if the requested region of the aperture is free */ + for (pg = start_pg; pg <= end_pg; pg++) { + /* Allocate new GART pages if necessary */ + if (i460_pg_detail[pg] == NULL) { + temp = intel_i460_alloc_large_page(pg); + if (temp == NULL) + return -ENOMEM; + agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp, + 0); + intel_i460_read_back(agp_bridge.gatt_table + pg); + } + + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++) + { + if (i460_pg_detail[pg][idx] != 0) + return -EBUSY; + } + } + +#if 0 + /* not necessary since 460 GART is operated in coherent mode... */ + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } +#endif + + for (pg = start_pg, i = 0; pg <= end_pg; pg++) { + paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++, i++) + { + mem->memory[i] = paddr + (idx * PAGE_SIZE); + i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i], + mem->type); + i460_pg_count[pg]++; + } + } + + return 0; +} + +static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + + for (i = 0, pg = start_pg; pg <= end_pg; pg++) { + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++, i++) + { + mem->memory[i] = 0; + i460_pg_detail[pg][idx] = 0; + i460_pg_count[pg]--; + } + + /* Free GART pages if they are unused */ + if (i460_pg_count[pg] == 0) { + paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); + agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; + intel_i460_read_back(agp_bridge.gatt_table + pg); + intel_i460_free_large_page(pg, paddr); + } + } + return 0; +} + +/* Dummy routines to call the approriate {cpk,kpc} function */ + +static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_insert_memory_cpk(mem, pg_start, type); + else + return intel_i460_insert_memory_kpc(mem, pg_start, type); +} + +static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_remove_memory_cpk(mem, pg_start, type); + else + return intel_i460_remove_memory_kpc(mem, pg_start, type); +} + +/* + * If the kernel page size is smaller that the chipset page size, we don't + * want to allocate memory until we know where it is to be bound in the + * aperture (a multi-kernel-page alloc might fit inside of an already + * allocated GART page). Consequently, don't allocate or free anything + * if i460_cpk (meaning chipset pages per kernel page) isn't set. + * + * Let's just hope nobody counts on the allocated AGP memory being there + * before bind time (I don't think current drivers do)... + */ +static unsigned long intel_i460_alloc_page(void) +{ + if (intel_i460_cpk) + return agp_generic_alloc_page(); + + /* Returning NULL would cause problems */ + return ~0UL; +} + +static void intel_i460_destroy_page(unsigned long page) +{ + if (intel_i460_cpk) + agp_generic_destroy_page(page); +} + +static gatt_mask intel_i460_masks[] = +{ + { + INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, + 0 + } +}; + +static unsigned long intel_i460_mask_memory(unsigned long addr, int type) +{ + /* Make sure the returned address is a valid GATT entry */ + return (agp_bridge.masks[0].mask + | (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); +} + +static unsigned long intel_i460_unmask_memory(unsigned long addr) +{ + /* Turn a GATT entry into a physical address */ + return ((addr & 0xffffff) << 12); +} + +static aper_size_info_8 intel_i460_sizes[3] = +{ + /* + * The 32GB aperture is only available with a 4M GART page size. + * Due to the dynamic GART page size, we can't figure out page_order + * or num_entries until runtime. + */ + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; + +static int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused))) +{ + agp_bridge.masks = intel_i460_masks; + agp_bridge.aperture_sizes = (void *) intel_i460_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 3; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_i460_configure; + agp_bridge.fetch_size = intel_i460_fetch_size; + agp_bridge.cleanup = intel_i460_cleanup; + agp_bridge.tlb_flush = intel_i460_tlb_flush; + agp_bridge.mask_memory = intel_i460_mask_memory; + agp_bridge.unmask_memory = intel_i460_unmask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = intel_i460_create_gatt_table; + agp_bridge.free_gatt_table = intel_i460_free_gatt_table; + agp_bridge.insert_memory = intel_i460_insert_memory; + agp_bridge.remove_memory = intel_i460_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = intel_i460_alloc_page; + agp_bridge.agp_destroy_page = intel_i460_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 1; + return 0; +} + +#endif /* CONFIG_AGP_I460 */ + #ifdef CONFIG_AGP_INTEL static int intel_fetch_size(void) @@ -1497,7 +2110,7 @@ previous_size = A_SIZE_8(agp_bridge.previous_size); pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp); - pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, + pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, temp & ~(1 << 1)); pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, previous_size->size_value); @@ -1507,14 +2120,14 @@ static int intel_820_configure(void) { u32 temp; - u8 temp2; + u8 temp2; aper_size_info_8 *current_size; current_size = A_SIZE_8(agp_bridge.current_size); /* aperture size */ pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); + current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); @@ -1522,19 +2135,19 @@ /* attbase - aperture base */ pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); + agp_bridge.gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); /* global enable aperture access */ /* This flag is not accessed through MCHCFG register as in */ /* i850 chipset. */ pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2); - pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, + pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, temp2 | (1 << 1)); /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); + pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); return 0; } @@ -1548,7 +2161,7 @@ /* aperture size */ pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); + current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); @@ -1556,17 +2169,17 @@ /* attbase - aperture base */ pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); + agp_bridge.gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2); pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, temp2 | (1 << 9)); /* clear any possible error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); + pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); return 0; } @@ -1580,7 +2193,7 @@ /* aperture size */ pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); + current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); @@ -1588,17 +2201,17 @@ /* attbase - aperture base */ pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); + agp_bridge.gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); /* agpm */ pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2); pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM, temp2 | (1 << 1)); /* clear any possible error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); + pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); return 0; } @@ -1612,7 +2225,7 @@ /* aperture size */ pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); + current_size->size_value); /* address to map to */ pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); @@ -1620,17 +2233,17 @@ /* attbase - aperture base */ pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); + agp_bridge.gatt_bus_addr); /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); /* mcgcfg */ pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2); pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, temp2 | (1 << 9)); /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); + pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); return 0; } @@ -1738,7 +2351,7 @@ {4, 1024, 0, 63} }; -static aper_size_info_8 intel_830mp_sizes[4] = +static aper_size_info_8 intel_830mp_sizes[4] = { {256, 65536, 6, 0}, {128, 32768, 5, 32}, @@ -1746,10 +2359,9 @@ {32, 8192, 3, 56} }; -static int __init intel_generic_setup (struct pci_dev *pdev) +static int __init intel_generic_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_generic_sizes; agp_bridge.size_type = U16_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1760,6 +2372,7 @@ agp_bridge.cleanup = intel_cleanup; agp_bridge.tlb_flush = intel_tlbflush; agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1775,15 +2388,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } - -static int __init intel_820_setup (struct pci_dev *pdev) +static int __init intel_820_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1806,17 +2415,16 @@ agp_bridge.agp_destroy_page = agp_generic_destroy_page; agp_bridge.suspend = agp_generic_suspend; agp_bridge.resume = agp_generic_resume; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_830mp_setup (struct pci_dev *pdev) +static int __init intel_830mp_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_830mp_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 4; @@ -1839,17 +2447,16 @@ agp_bridge.agp_destroy_page = agp_generic_destroy_page; agp_bridge.suspend = agp_generic_suspend; agp_bridge.resume = agp_generic_resume; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_840_setup (struct pci_dev *pdev) +static int __init intel_840_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1875,14 +2482,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_845_setup (struct pci_dev *pdev) +static int __init intel_845_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1893,6 +2497,7 @@ agp_bridge.cleanup = intel_8xx_cleanup; agp_bridge.tlb_flush = intel_8xx_tlbflush; agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1908,14 +2513,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_850_setup (struct pci_dev *pdev) +static int __init intel_850_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1926,6 +2528,7 @@ agp_bridge.cleanup = intel_8xx_cleanup; agp_bridge.tlb_flush = intel_8xx_tlbflush; agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1941,14 +2544,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_860_setup (struct pci_dev *pdev) +static int __init intel_860_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1974,8 +2574,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_INTEL */ @@ -2065,10 +2663,9 @@ {0x00000000, 0} }; -static int __init via_generic_setup (struct pci_dev *pdev) +static int __init via_generic_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = via_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) via_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2079,6 +2676,7 @@ agp_bridge.cleanup = via_cleanup; agp_bridge.tlb_flush = via_tlbflush; agp_bridge.mask_memory = via_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -2094,8 +2692,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_VIA */ @@ -2182,7 +2778,6 @@ static int __init sis_generic_setup (struct pci_dev *pdev) { agp_bridge.masks = sis_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) sis_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2193,6 +2788,7 @@ agp_bridge.cleanup = sis_cleanup; agp_bridge.tlb_flush = sis_tlbflush; agp_bridge.mask_memory = sis_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -2215,8 +2811,8 @@ #ifdef CONFIG_AGP_AMD typedef struct _amd_page_map { - unsigned long *real; - unsigned long *remapped; + u32 *real; + u32 *remapped; } amd_page_map; static struct _amd_irongate_private { @@ -2229,14 +2825,13 @@ { int i; - page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + page_map->real = (u32 *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) { return -ENOMEM; } SetPageReserved(virt_to_page(page_map->real)); CACHE_FLUSH(); - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), - PAGE_SIZE); + page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); @@ -2245,7 +2840,7 @@ } CACHE_FLUSH(); - for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { + for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { page_map->remapped[i] = agp_bridge.scratch_page; } @@ -2266,7 +2861,7 @@ amd_page_map *entry; tables = amd_irongate_private.gatt_pages; - for(i = 0; i < amd_irongate_private.num_tables; i++) { + for (i = 0; i < amd_irongate_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) { @@ -2285,8 +2880,7 @@ int retval = 0; int i; - tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *), - GFP_KERNEL); + tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *), GFP_KERNEL); if (tables == NULL) { return -ENOMEM; } @@ -2315,11 +2909,9 @@ */ #define GET_PAGE_DIR_OFF(addr) (addr >> 22) -#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ - GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) -#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) -#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ - GET_PAGE_DIR_IDX(addr)]->remapped) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#define GET_GATT(addr) (amd_irongate_private.gatt_pages[GET_PAGE_DIR_IDX(addr)]->remapped) static int amd_create_gatt_table(void) { @@ -2356,7 +2948,7 @@ agp_bridge.gart_bus_addr = addr; /* Calculate the agp offset */ - for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { + for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { page_dir.remapped[GET_PAGE_DIR_OFF(addr)] = virt_to_phys(amd_irongate_private.gatt_pages[i]->real); page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001; @@ -2368,7 +2960,7 @@ static int amd_free_gatt_table(void) { amd_page_map page_dir; - + page_dir.real = agp_bridge.gatt_table_real; page_dir.remapped = agp_bridge.gatt_table; @@ -2418,7 +3010,7 @@ /* Write the Sync register */ pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); - + /* Set indexing mode */ pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00); @@ -2482,7 +3074,7 @@ off_t pg_start, int type) { int i, j, num_entries; - unsigned long *cur_gatt; + u32 *cur_gatt; unsigned long addr; num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; @@ -2512,17 +3104,17 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); return 0; } -static int amd_remove_memory(agp_memory * mem, off_t pg_start, - int type) +static int amd_remove_memory(agp_memory * mem, off_t pg_start, int type) { int i; - unsigned long *cur_gatt; + u32 *cur_gatt; unsigned long addr; if (type != 0 || mem->type != 0) { @@ -2531,8 +3123,7 @@ for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = - (unsigned long) agp_bridge.scratch_page; + cur_gatt[GET_GATT_OFF(addr)] = (unsigned long) agp_bridge.scratch_page; } agp_bridge.tlb_flush(mem); @@ -2555,10 +3146,9 @@ {0x00000001, 0} }; -static int __init amd_irongate_setup (struct pci_dev *pdev) +static int __init amd_irongate_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = amd_irongate_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; agp_bridge.size_type = LVL2_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2569,6 +3159,7 @@ agp_bridge.cleanup = amd_irongate_cleanup; agp_bridge.tlb_flush = amd_irongate_tlbflush; agp_bridge.mask_memory = amd_irongate_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = amd_create_gatt_table; @@ -2584,8 +3175,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_AMD */ @@ -2667,7 +3256,6 @@ * next couple of lines below it. I suspect this was an oversight, * but you might want to check up on this? */ - pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); @@ -2802,10 +3390,9 @@ {4, 1024, 0, 3} }; -static int __init ali_generic_setup (struct pci_dev *pdev) +static int __init ali_generic_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = ali_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) ali_generic_sizes; agp_bridge.size_type = U32_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2816,6 +3403,7 @@ agp_bridge.cleanup = ali_cleanup; agp_bridge.tlb_flush = ali_tlbflush; agp_bridge.mask_memory = ali_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = ali_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -2831,8 +3419,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_ALI */ @@ -2864,8 +3450,7 @@ } SetPageReserved(virt_to_page(page_map->real)); CACHE_FLUSH(); - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), - PAGE_SIZE); + page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); @@ -2874,7 +3459,7 @@ } CACHE_FLUSH(); - for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { + for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { page_map->remapped[i] = agp_bridge.scratch_page; } @@ -2895,7 +3480,7 @@ serverworks_page_map *entry; tables = serverworks_private.gatt_pages; - for(i = 0; i < serverworks_private.num_tables; i++) { + for (i = 0; i < serverworks_private.num_tables; i++) { entry = tables[i]; if (entry != NULL) { if (entry->real != NULL) { @@ -2914,8 +3499,7 @@ int retval = 0; int i; - tables = kmalloc((nr_tables + 1) * sizeof(serverworks_page_map *), - GFP_KERNEL); + tables = kmalloc((nr_tables + 1) * sizeof(serverworks_page_map *), GFP_KERNEL); if (tables == NULL) { return -ENOMEM; } @@ -2974,8 +3558,9 @@ return retval; } /* Create a fake scratch directory */ - for(i = 0; i < 1024; i++) { - serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page; + for (i = 0; i < 1024; i++) { + serverworks_private.scratch_dir.remapped[i] + = (unsigned long) agp_bridge.scratch_page; page_dir.remapped[i] = virt_to_phys(serverworks_private.scratch_dir.real); page_dir.remapped[i] |= 0x00000001; @@ -3002,9 +3587,9 @@ &temp); agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - /* Calculate the agp offset */ + /* Calculate the agp offset */ - for(i = 0; i < value->num_entries / 1024; i++) { + for (i = 0; i < value->num_entries / 1024; i++) { page_dir.remapped[i] = virt_to_phys(serverworks_private.gatt_pages[i]->real); page_dir.remapped[i] |= 0x00000001; @@ -3016,7 +3601,7 @@ static int serverworks_free_gatt_table(void) { serverworks_page_map page_dir; - + page_dir.real = agp_bridge.gatt_table_real; page_dir.remapped = agp_bridge.gatt_table; @@ -3081,7 +3666,7 @@ OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a); - OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, + OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, agp_bridge.gatt_bus_addr); cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND); @@ -3152,18 +3737,16 @@ OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01); end = jiffies + 3*HZ; - while(INREG8(serverworks_private.registers, - SVWRKS_POSTFLUSH) == 0x01) { - if((signed)(end - jiffies) <= 0) { + while(INREG8(serverworks_private.registers, SVWRKS_POSTFLUSH) == 0x01) { + if ((signed)(end - jiffies) <= 0) { printk(KERN_ERR "Posted write buffer flush took more" "then 3 seconds\n"); } } OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001); end = jiffies + 3*HZ; - while(INREG32(serverworks_private.registers, - SVWRKS_DIRFLUSH) == 0x00000001) { - if((signed)(end - jiffies) <= 0) { + while(INREG32(serverworks_private.registers, SVWRKS_DIRFLUSH) == 0x00000001) { + if ((signed)(end - jiffies) <= 0) { printk(KERN_ERR "TLB flush took more" "then 3 seconds\n"); } @@ -3177,8 +3760,7 @@ return addr | agp_bridge.masks[0].mask; } -static int serverworks_insert_memory(agp_memory * mem, - off_t pg_start, int type) +static int serverworks_insert_memory(agp_memory * mem, off_t pg_start, int type) { int i, j, num_entries; unsigned long *cur_gatt; @@ -3211,14 +3793,14 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); return 0; } -static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, - int type) +static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, int type) { int i; unsigned long *cur_gatt; @@ -3234,8 +3816,7 @@ for (i = pg_start; i < (mem->page_count + pg_start); i++) { addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = - (unsigned long) agp_bridge.scratch_page; + cur_gatt[GET_GATT_OFF(addr)] = (unsigned long) agp_bridge.scratch_page; } agp_bridge.tlb_flush(mem); @@ -3288,7 +3869,7 @@ } if (cap_ptr != 0x00) { /* - * Ok, here we have a AGP device. Disable impossible + * Ok, here we have a AGP device. Disable impossible * settings, and adjust the readqueue to the minimum. */ @@ -3368,7 +3949,6 @@ serverworks_private.svrwrks_dev = pdev; agp_bridge.masks = serverworks_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) serverworks_sizes; agp_bridge.size_type = LVL2_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -3399,11 +3979,11 @@ serverworks_private.gart_addr_ofs = 0x10; - if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { + if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(agp_bridge.dev, SVWRKS_APSIZE + 4, &temp2); - if(temp2 != 0) { + if (temp2 != 0) { printk("Detected 64 bit aperture address, but top " "bits are not zero. Disabling agp\n"); return -ENODEV; @@ -3416,11 +3996,11 @@ pci_read_config_dword(agp_bridge.dev, serverworks_private.mm_addr_ofs, &temp); - if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { + if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_read_config_dword(agp_bridge.dev, serverworks_private.mm_addr_ofs + 4, &temp2); - if(temp2 != 0) { + if (temp2 != 0) { printk("Detected 64 bit MMIO address, but top " "bits are not zero. Disabling agp\n"); return -ENODEV; @@ -3432,6 +4012,362 @@ #endif /* CONFIG_AGP_SWORKS */ +#ifdef CONFIG_AGP_HP_ZX1 + +#ifndef log2 +#define log2(x) ffz(~(x)) +#endif + +#define HP_ZX1_IOVA_BASE GB(1UL) +#define HP_ZX1_IOVA_SIZE GB(1UL) +#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) +#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL + +#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL +#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift) + +static aper_size_info_fixed hp_zx1_sizes[] = +{ + {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ +}; + +static gatt_mask hp_zx1_masks[] = +{ + {HP_ZX1_PDIR_VALID_BIT, 0} +}; + +static struct _hp_private { + struct pci_dev *ioc; + volatile u8 *registers; + u64 *io_pdir; // PDIR for entire IOVA + u64 *gatt; // PDIR just for GART (subset of above) + u64 gatt_entries; + u64 iova_base; + u64 gart_base; + u64 gart_size; + u64 io_pdir_size; + int io_pdir_owner; // do we own it, or share it with sba_iommu? + int io_page_size; + int io_tlb_shift; + int io_tlb_ps; // IOC ps config + int io_pages_per_kpage; +} hp_private; + +static int __init hp_zx1_ioc_shared(void) +{ + struct _hp_private *hp = &hp_private; + + printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); + + /* + * IOC already configured by sba_iommu module; just use + * its setup. We assume: + * - IOVA space is 1Gb in size + * - first 512Mb is IOMMU, second 512Mb is GART + */ + hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG); + switch (hp->io_tlb_ps) { + case 0: hp->io_tlb_shift = 12; break; + case 1: hp->io_tlb_shift = 13; break; + case 2: hp->io_tlb_shift = 14; break; + case 3: hp->io_tlb_shift = 16; break; + default: + printk(KERN_ERR PFX "Invalid IOTLB page size " + "configuration 0x%x\n", hp->io_tlb_ps); + hp->gatt = 0; + hp->gatt_entries = 0; + return -ENODEV; + } + hp->io_page_size = 1 << hp->io_tlb_shift; + hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; + + hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1; + hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; + + hp->gart_size = HP_ZX1_GART_SIZE; + hp->gatt_entries = hp->gart_size / hp->io_page_size; + + hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE)); + hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; + + if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { + hp->gatt = 0; + hp->gatt_entries = 0; + printk(KERN_ERR PFX "No reserved IO PDIR entry found; " + "GART disabled\n"); + return -ENODEV; + } + + return 0; +} + +static int __init hp_zx1_ioc_owner(u8 ioc_rev) +{ + struct _hp_private *hp = &hp_private; + + printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); + + /* + * Select an IOV page size no larger than system page size. + */ + if (PAGE_SIZE >= KB(64)) { + hp->io_tlb_shift = 16; + hp->io_tlb_ps = 3; + } else if (PAGE_SIZE >= KB(16)) { + hp->io_tlb_shift = 14; + hp->io_tlb_ps = 2; + } else if (PAGE_SIZE >= KB(8)) { + hp->io_tlb_shift = 13; + hp->io_tlb_ps = 1; + } else { + hp->io_tlb_shift = 12; + hp->io_tlb_ps = 0; + } + hp->io_page_size = 1 << hp->io_tlb_shift; + hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; + + hp->iova_base = HP_ZX1_IOVA_BASE; + hp->gart_size = HP_ZX1_GART_SIZE; + hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; + + hp->gatt_entries = hp->gart_size / hp->io_page_size; + hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); + + return 0; +} + +static int __init hp_zx1_ioc_init(void) +{ + struct _hp_private *hp = &hp_private; + struct pci_dev *ioc; + int i; + u8 ioc_rev; + + ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); + if (!ioc) { + printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n"); + return -ENODEV; + } + hp->ioc = ioc; + + pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { + hp->registers = (u8 *) ioremap(pci_resource_start(ioc, + i), + pci_resource_len(ioc, i)); + break; + } + } + if (!hp->registers) { + printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n"); + + return -ENODEV; + } + + /* + * If the IOTLB is currently disabled, we can take it over. + * Otherwise, we have to share with sba_iommu. + */ + hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; + + if (hp->io_pdir_owner) + return hp_zx1_ioc_owner(ioc_rev); + + return hp_zx1_ioc_shared(); +} + +static int hp_zx1_fetch_size(void) +{ + int size; + + size = hp_private.gart_size / MB(1); + hp_zx1_sizes[0].size = size; + agp_bridge.current_size = (void *) &hp_zx1_sizes[0]; + return size; +} + +static int hp_zx1_configure(void) +{ + struct _hp_private *hp = &hp_private; + + agp_bridge.gart_bus_addr = hp->gart_base; + agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP); + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); + + if (hp->io_pdir_owner) { + OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir)); + OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps); + OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); + OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1); + OUTREG64(hp->registers, HP_ZX1_PCOM, hp->iova_base | log2(HP_ZX1_IOVA_SIZE)); + INREG64(hp->registers, HP_ZX1_PCOM); + } + + return 0; +} + +static void hp_zx1_cleanup(void) +{ + struct _hp_private *hp = &hp_private; + + if (hp->io_pdir_owner) + OUTREG64(hp->registers, HP_ZX1_IBASE, 0); + iounmap((void *) hp->registers); +} + +static void hp_zx1_tlbflush(agp_memory * mem) +{ + struct _hp_private *hp = &hp_private; + + OUTREG64(hp->registers, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size)); + INREG64(hp->registers, HP_ZX1_PCOM); +} + +static int hp_zx1_create_gatt_table(void) +{ + struct _hp_private *hp = &hp_private; + int i; + + if (hp->io_pdir_owner) { + hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, + get_order(hp->io_pdir_size)); + if (!hp->io_pdir) { + printk(KERN_ERR PFX "Couldn't allocate contiguous memory for I/O PDIR\n"); + hp->gatt = 0; + hp->gatt_entries = 0; + return -ENOMEM; + } + memset(hp->io_pdir, 0, hp->io_pdir_size); + + hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; + } + + for (i = 0; i < hp->gatt_entries; i++) { + hp->gatt[i] = (unsigned long) agp_bridge.scratch_page; + } + + return 0; +} + +static int hp_zx1_free_gatt_table(void) +{ + struct _hp_private *hp = &hp_private; + + if (hp->io_pdir_owner) + free_pages((unsigned long) hp->io_pdir, + get_order(hp->io_pdir_size)); + else + hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; + return 0; +} + +static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type) +{ + struct _hp_private *hp = &hp_private; + int i, k; + off_t j, io_pg_start; + int io_pg_count; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + io_pg_start = hp->io_pages_per_kpage * pg_start; + io_pg_count = hp->io_pages_per_kpage * mem->page_count; + if ((io_pg_start + io_pg_count) > hp->gatt_entries) { + return -EINVAL; + } + + j = io_pg_start; + while (j < (io_pg_start + io_pg_count)) { + if (hp->gatt[j]) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + + for (i = 0, j = io_pg_start; i < mem->page_count; i++) { + unsigned long paddr; + + paddr = mem->memory[i]; + for (k = 0; + k < hp->io_pages_per_kpage; + k++, j++, paddr += hp->io_page_size) { + hp->gatt[j] = agp_bridge.mask_memory(paddr, type); + } + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type) +{ + struct _hp_private *hp = &hp_private; + int i, io_pg_start, io_pg_count; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + io_pg_start = hp->io_pages_per_kpage * pg_start; + io_pg_count = hp->io_pages_per_kpage * mem->page_count; + for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { + hp->gatt[i] = agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static unsigned long hp_zx1_mask_memory(unsigned long addr, int type) +{ + return HP_ZX1_PDIR_VALID_BIT | addr; +} + +static unsigned long hp_zx1_unmask_memory(unsigned long addr) +{ + return addr & ~(HP_ZX1_PDIR_VALID_BIT); +} + +static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused))) +{ + agp_bridge.masks = hp_zx1_masks; + agp_bridge.dev_private_data = NULL; + agp_bridge.size_type = FIXED_APER_SIZE; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = hp_zx1_configure; + agp_bridge.fetch_size = hp_zx1_fetch_size; + agp_bridge.cleanup = hp_zx1_cleanup; + agp_bridge.tlb_flush = hp_zx1_tlbflush; + agp_bridge.mask_memory = hp_zx1_mask_memory; + agp_bridge.unmask_memory = hp_zx1_unmask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; + agp_bridge.free_gatt_table = hp_zx1_free_gatt_table; + agp_bridge.insert_memory = hp_zx1_insert_memory; + agp_bridge.remove_memory = hp_zx1_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 1; + + return hp_zx1_ioc_init(); +} + +#endif /* CONFIG_AGP_HP_ZX1 */ /* per-chipset initialization data. * note -- all chipsets for a single vendor MUST be grouped together @@ -3452,7 +4388,7 @@ "Ali", "M1541", ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1621_0, + { PCI_DEVICE_ID_AL_M1621_0, PCI_VENDOR_ID_AL, ALI_M1621, "Ali", @@ -3487,7 +4423,7 @@ ALI_M1651, "Ali", "M1651", - ali_generic_setup }, + ali_generic_setup }, { 0, PCI_VENDOR_ID_AL, ALI_GENERIC, @@ -3599,6 +4535,15 @@ #endif /* CONFIG_AGP_INTEL */ +#ifdef CONFIG_AGP_I460 + { PCI_DEVICE_ID_INTEL_460GX, + PCI_VENDOR_ID_INTEL, + INTEL_460GX, + "Intel", + "460GX", + intel_i460_setup }, +#endif + #ifdef CONFIG_AGP_SIS { PCI_DEVICE_ID_SI_740, PCI_VENDOR_ID_SI, @@ -3654,7 +4599,7 @@ "SiS", "530", sis_generic_setup }, - { PCI_DEVICE_ID_SI_550, + { PCI_DEVICE_ID_SI_550, PCI_VENDOR_ID_SI, SIS_GENERIC, "SiS", @@ -3719,6 +4664,15 @@ via_generic_setup }, #endif /* CONFIG_AGP_VIA */ +#ifdef CONFIG_AGP_HP_ZX1 + { PCI_DEVICE_ID_HP_ZX1_LBA, + PCI_VENDOR_ID_HP, + HP_ZX1, + "HP", + "ZX1", + hp_zx1_setup }, +#endif + { 0, }, /* dummy final entry, always present */ }; @@ -3727,7 +4681,7 @@ static int __init agp_lookup_host_bridge (struct pci_dev *pdev) { int i; - + for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) if (pdev->vendor == agp_bridge_info[i].vendor_id) break; @@ -3775,7 +4729,7 @@ agp_bridge.type = agp_bridge_info[i].chipset; return agp_bridge_info[i].chipset_setup (pdev); } - + i++; } @@ -3797,6 +4751,18 @@ return -ENODEV; } +static int agp_check_supported_device(struct pci_dev *dev) { + + int i; + + for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) { + if (dev->vendor == agp_bridge_info[i].vendor_id && + dev->device == agp_bridge_info[i].device_id) + return 1; + } + + return 0; +} /* Supported Device Scanning routine */ @@ -3805,8 +4771,14 @@ struct pci_dev *dev = NULL; u8 cap_ptr = 0x00; - if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) - return -ENODEV; + /* + * Some systems have multiple host bridges (i.e. BigSur), so + * we can't just use the first one we find. + */ + do { + if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev)) == NULL) + return -ENODEV; + } while(!agp_check_supported_device(dev)); agp_bridge.dev = dev; @@ -3884,12 +4856,12 @@ case PCI_DEVICE_ID_INTEL_830_M_0: i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_830_M_1, - NULL); - if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { + PCI_DEVICE_ID_INTEL_830_M_1, + NULL); + if (i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_830_M_1, - i810_dev); + PCI_DEVICE_ID_INTEL_830_M_1, + i810_dev); } if (i810_dev == NULL) { @@ -3913,9 +4885,8 @@ if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) { struct pci_dev *bridge_dev; - bridge_dev = pci_find_slot ((unsigned int)dev->bus->number, - PCI_DEVFN(0, 1)); - if(bridge_dev == NULL) { + bridge_dev = pci_find_slot ((unsigned int)dev->bus->number, PCI_DEVFN(0, 1)); + if (bridge_dev == NULL) { printk(KERN_INFO PFX "agpgart: Detected a Serverworks " "Chipset, but could not find the secondary " "device.\n"); @@ -3933,7 +4904,7 @@ return serverworks_setup(bridge_dev); default: - if(agp_try_unsupported) { + if (agp_try_unsupported) { agp_bridge.type = SVWRKS_GENERIC; return serverworks_setup(bridge_dev); } @@ -3943,6 +4914,23 @@ #endif /* CONFIG_AGP_SWORKS */ +#ifdef CONFIG_AGP_HP_ZX1 + if (dev->vendor == PCI_VENDOR_ID_HP) { + do { + /* ZX1 LBAs can be either PCI or AGP bridges */ + if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { + printk(KERN_INFO PFX "Detected HP ZX1 AGP " + "chipset at %s\n", dev->slot_name); + agp_bridge.type = HP_ZX1; + agp_bridge.dev = dev; + return hp_zx1_setup(dev); + } + dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev); + } while (dev); + return -ENODEV; + } +#endif /* CONFIG_AGP_HP_ZX1 */ + /* find capndx */ cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); if (cap_ptr == 0x00) @@ -4052,7 +5040,7 @@ goto err_out; } got_gatt = 1; - + agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); if (agp_bridge.key_list == NULL) { printk(KERN_ERR PFX "error allocating memory for key lists.\n"); @@ -4060,7 +5048,7 @@ goto err_out; } got_keylist = 1; - + /* FIXME vmalloc'd memory not guaranteed contiguous */ memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); @@ -4105,14 +5093,13 @@ static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data) { - switch(rq) - { + switch(rq) { case PM_SUSPEND: return agp_bridge.suspend(); case PM_RESUME: agp_bridge.resume(); return 0; - } + } return 0; } @@ -4150,7 +5137,7 @@ } inter_module_register("drm_agp", THIS_MODULE, &drm_agp); - + pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power); return 0; } diff -Nru a/drivers/char/drm/ati_pcigart.h b/drivers/char/drm/ati_pcigart.h --- a/drivers/char/drm/ati_pcigart.h Fri Apr 26 00:01:27 2002 +++ b/drivers/char/drm/ati_pcigart.h Fri Apr 26 00:01:27 2002 @@ -30,14 +30,20 @@ #define __NO_VERSION__ #include "drmP.h" -#if PAGE_SIZE == 8192 +#if PAGE_SIZE == 65536 +# define ATI_PCIGART_TABLE_ORDER 0 +# define ATI_PCIGART_TABLE_PAGES (1 << 0) +#elif PAGE_SIZE == 16384 +# define ATI_PCIGART_TABLE_ORDER 1 +# define ATI_PCIGART_TABLE_PAGES (1 << 1) +#elif PAGE_SIZE == 8192 # define ATI_PCIGART_TABLE_ORDER 2 # define ATI_PCIGART_TABLE_PAGES (1 << 2) #elif PAGE_SIZE == 4096 # define ATI_PCIGART_TABLE_ORDER 3 # define ATI_PCIGART_TABLE_PAGES (1 << 3) #else -# error - PAGE_SIZE not 8K or 4K +# error - PAGE_SIZE not 64K, 16K, 8K or 4K #endif # define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */ @@ -103,6 +109,7 @@ goto done; } +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) if ( !dev->pdev ) { DRM_ERROR( "PCI device unknown!\n" ); goto done; @@ -117,6 +124,9 @@ address = 0; goto done; } +#else + bus_address = virt_to_bus( (void *)address ); +#endif pci_gart = (u32 *)address; @@ -126,6 +136,7 @@ memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) ); for ( i = 0 ; i < pages ; i++ ) { +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) /* we need to support large memory configurations */ entry->busaddr[i] = pci_map_single(dev->pdev, page_address( entry->pagelist[i] ), @@ -139,7 +150,9 @@ goto done; } page_base = (u32) entry->busaddr[i]; - +#else + page_base = page_to_bus( entry->pagelist[i] ); +#endif for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { *pci_gart++ = cpu_to_le32( page_base ); page_base += ATI_PCIGART_PAGE_SIZE; @@ -164,6 +177,7 @@ unsigned long addr, dma_addr_t bus_addr) { +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) drm_sg_mem_t *entry = dev->sg; unsigned long pages; int i; @@ -188,6 +202,8 @@ PAGE_SIZE, PCI_DMA_TODEVICE); } } + +#endif if ( addr ) { DRM(ati_free_pcigart_table)( addr ); diff -Nru a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h --- a/drivers/char/drm/drmP.h Fri Apr 26 00:01:27 2002 +++ b/drivers/char/drm/drmP.h Fri Apr 26 00:01:27 2002 @@ -366,13 +366,13 @@ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } /* Mapping helper macros */ -#define DRM_IOREMAP(map) \ - (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP(map, dev) \ + (map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) ) -#define DRM_IOREMAPFREE(map) \ +#define DRM_IOREMAPFREE(map, dev) \ do { \ if ( (map)->handle && (map)->size ) \ - DRM(ioremapfree)( (map)->handle, (map)->size ); \ + DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \ } while (0) #define DRM_FIND_MAP(_map, _o) \ @@ -826,8 +826,8 @@ extern unsigned long DRM(alloc_pages)(int order, int area); extern void DRM(free_pages)(unsigned long address, int order, int area); -extern void *DRM(ioremap)(unsigned long offset, unsigned long size); -extern void DRM(ioremapfree)(void *pt, unsigned long size); +extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev); +extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev); #if __REALLY_HAVE_AGP extern agp_memory *DRM(alloc_agp)(int pages, u32 type); diff -Nru a/drivers/char/drm/drm_agpsupport.h b/drivers/char/drm/drm_agpsupport.h --- a/drivers/char/drm/drm_agpsupport.h Fri Apr 26 00:01:26 2002 +++ b/drivers/char/drm/drm_agpsupport.h Fri Apr 26 00:01:26 2002 @@ -277,6 +277,7 @@ case INTEL_I840: head->chipset = "Intel i840"; break; case INTEL_I845: head->chipset = "Intel i845"; break; case INTEL_I850: head->chipset = "Intel i850"; break; + case INTEL_460GX: head->chipset = "Intel 460GX"; break; #endif case VIA_GENERIC: head->chipset = "VIA"; break; @@ -315,6 +316,8 @@ case SVWRKS_GENERIC: head->chipset = "Serverworks Generic"; break; #endif + + case HP_ZX1: head->chipset = "HP ZX1"; break; default: head->chipset = "Unknown"; break; } diff -Nru a/drivers/char/drm/drm_bufs.h b/drivers/char/drm/drm_bufs.h --- a/drivers/char/drm/drm_bufs.h Fri Apr 26 00:01:26 2002 +++ b/drivers/char/drm/drm_bufs.h Fri Apr 26 00:01:26 2002 @@ -107,7 +107,7 @@ switch ( map->type ) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) if ( map->offset + map->size < map->offset || map->offset < virt_to_phys(high_memory) ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); @@ -124,7 +124,7 @@ MTRR_TYPE_WRCOMB, 1 ); } #endif - map->handle = DRM(ioremap)( map->offset, map->size ); + map->handle = DRM(ioremap)( map->offset, map->size, dev ); break; case _DRM_SHM: @@ -249,7 +249,7 @@ DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); diff -Nru a/drivers/char/drm/drm_drv.h b/drivers/char/drm/drm_drv.h --- a/drivers/char/drm/drm_drv.h Fri Apr 26 00:01:27 2002 +++ b/drivers/char/drm/drm_drv.h Fri Apr 26 00:01:27 2002 @@ -439,7 +439,7 @@ DRM_DEBUG( "mtrr_del=%d\n", retcode ); } #endif - DRM(ioremapfree)( map->handle, map->size ); + DRM(ioremapfree)( map->handle, map->size, dev ); break; case _DRM_SHM: vfree(map->handle); diff -Nru a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h --- a/drivers/char/drm/drm_memory.h Fri Apr 26 00:01:26 2002 +++ b/drivers/char/drm/drm_memory.h Fri Apr 26 00:01:26 2002 @@ -306,9 +306,14 @@ } } -void *DRM(ioremap)(unsigned long offset, unsigned long size) +void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) { void *pt; +#if __REALLY_HAVE_AGP + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; +#endif if (!size) { DRM_MEM_ERROR(DRM_MEM_MAPPINGS, @@ -316,12 +321,51 @@ return NULL; } +#if __REALLY_HAVE_AGP + if(!dev->agp || dev->agp->cant_use_aperture == 0) + goto standard_ioremap; + + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *)list; + map = r_list->map; + if (!map) continue; + if (map->offset <= offset && + (map->offset + map->size) >= (offset + size)) + break; + } + + if(map && map->type == _DRM_AGP) { + struct drm_agp_mem *agpmem; + + for(agpmem = dev->agp->memory; agpmem; + agpmem = agpmem->next) { + if(agpmem->bound <= offset && + (agpmem->bound + (agpmem->pages + << PAGE_SHIFT)) >= (offset + size)) + break; + } + + if(agpmem == NULL) + goto ioremap_failure; + + pt = agpmem->memory->vmptr + (offset - agpmem->bound); + goto ioremap_success; + } + +standard_ioremap: +#endif if (!(pt = ioremap(offset, size))) { +#if __REALLY_HAVE_AGP +ioremap_failure: +#endif spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); return NULL; } +#if __REALLY_HAVE_AGP +ioremap_success: +#endif spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; @@ -329,7 +373,7 @@ return pt; } -void DRM(ioremapfree)(void *pt, unsigned long size) +void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) { int alloc_count; int free_count; @@ -337,7 +381,11 @@ if (!pt) DRM_MEM_ERROR(DRM_MEM_MAPPINGS, "Attempt to free NULL pointer\n"); +#if __REALLY_HAVE_AGP + else if(!dev->agp || dev->agp->cant_use_aperture == 0) +#else else +#endif iounmap(pt); spin_lock(&DRM(mem_lock)); diff -Nru a/drivers/char/drm/drm_scatter.h b/drivers/char/drm/drm_scatter.h --- a/drivers/char/drm/drm_scatter.h Fri Apr 26 00:01:26 2002 +++ b/drivers/char/drm/drm_scatter.h Fri Apr 26 00:01:26 2002 @@ -47,9 +47,11 @@ vfree( entry->virtual ); +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) DRM(free)( entry->busaddr, entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); +#endif DRM(free)( entry->pagelist, entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES ); @@ -94,6 +96,7 @@ return -ENOMEM; } +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); if ( !entry->busaddr ) { @@ -106,12 +109,15 @@ return -ENOMEM; } memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) ); +#endif entry->virtual = vmalloc_32( pages << PAGE_SHIFT ); if ( !entry->virtual ) { +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) DRM(free)( entry->busaddr, entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); +#endif DRM(free)( entry->pagelist, entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES ); diff -Nru a/drivers/char/drm/drm_vm.h b/drivers/char/drm/drm_vm.h --- a/drivers/char/drm/drm_vm.h Fri Apr 26 00:01:27 2002 +++ b/drivers/char/drm/drm_vm.h Fri Apr 26 00:01:27 2002 @@ -78,7 +78,7 @@ * Find the right map */ - if(!dev->agp->cant_use_aperture) goto vm_nopage_error; + if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; list_for_each(list, &dev->maplist->head) { r_list = (drm_map_list_t *)list; @@ -244,7 +244,7 @@ DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); @@ -491,15 +491,17 @@ switch (map->type) { case _DRM_AGP: -#if defined(__alpha__) - /* - * On Alpha we can't talk to bus dma address from the - * CPU, so for memory of type DRM_AGP, we'll deal with - * sorting out the real physical pages and mappings - * in nopage() - */ - vma->vm_ops = &DRM(vm_ops); - break; +#if __REALLY_HAVE_AGP + if(dev->agp->cant_use_aperture == 1) { + /* + * On some systems we can't talk to bus dma address from + * the CPU, so for memory of type DRM_AGP, we'll deal + * with sorting out the real physical pages and mappings + * in nopage() + */ + vma->vm_ops = &DRM(vm_ops); + goto mapswitch_out; + } #endif /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: @@ -511,8 +513,7 @@ pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; } #elif defined(__ia64__) - if (map->type != _DRM_AGP) - vma->vm_page_prot = + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); #elif defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; @@ -556,11 +557,14 @@ #else vma->vm_pte = (unsigned long)map; #endif - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_RESERVED; break; default: return -EINVAL; /* This should never happen. */ } +#if __REALLY_HAVE_AGP +mapswitch_out: +#endif vma->vm_flags |= VM_RESERVED; /* Don't swap */ #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */ diff -Nru a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c --- a/drivers/char/drm/i810_dma.c Fri Apr 26 00:01:27 2002 +++ b/drivers/char/drm/i810_dma.c Fri Apr 26 00:01:27 2002 @@ -313,7 +313,7 @@ if(dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if(dev_priv->hw_status_page != 0UL) { i810_free_page(dev, dev_priv->hw_status_page); @@ -327,7 +327,8 @@ for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); + DRM(ioremapfree)(buf_priv->kernel_virtual, + buf->total, dev); } } return 0; @@ -400,7 +401,7 @@ *buf_priv->in_use = I810_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -456,7 +457,7 @@ dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff -Nru a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c --- a/drivers/char/drm/mga_dma.c Fri Apr 26 00:01:27 2002 +++ b/drivers/char/drm/mga_dma.c Fri Apr 26 00:01:27 2002 @@ -557,9 +557,9 @@ (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); - DRM_IOREMAP( dev_priv->warp ); - DRM_IOREMAP( dev_priv->primary ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->warp, dev ); + DRM_IOREMAP( dev_priv->primary, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->warp->handle || !dev_priv->primary->handle || @@ -647,9 +647,9 @@ if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; - DRM_IOREMAPFREE( dev_priv->warp ); - DRM_IOREMAPFREE( dev_priv->primary ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->warp, dev ); + DRM_IOREMAPFREE( dev_priv->primary, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); diff -Nru a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c --- a/drivers/char/drm/r128_cce.c Fri Apr 26 00:01:26 2002 +++ b/drivers/char/drm/r128_cce.c Fri Apr 26 00:01:26 2002 @@ -216,7 +216,23 @@ int i; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { +#ifndef CONFIG_AGP_I460 if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail ) { +#else + /* + * XXX - this is (I think) a 460GX specific hack + * + * When doing texturing, ring.tail sometimes gets ahead of + * PM4_BUFFER_DL_WPTR by 2; consequently, the card processes + * its whole quota of instructions and *ring.head is still 2 + * short of ring.tail. Work around this for now in lieu of + * a better solution. + */ + if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail + || ( dev->agp->agp_info.chipset == INTEL_460GX + && ( dev_priv->ring.tail - GET_RING_HEAD( &dev_priv->ring ) ) == 2 ) ) + { +#endif int pm4stat = R128_READ( R128_PM4_STAT ); if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size ) && @@ -317,7 +333,7 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev, drm_r128_private_t *dev_priv ) { - u32 ring_start; + u32 ring_start, rptr_addr; u32 tmp; DRM_DEBUG( "%s\n", __FUNCTION__ ); @@ -341,8 +357,28 @@ SET_RING_HEAD( &dev_priv->ring, 0 ); if ( !dev_priv->is_pci ) { - R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, - dev_priv->ring_rptr->offset ); +#ifdef CONFIG_AGP_I460 + /* + * XXX - This is a 460GX specific hack + * + * We have to hack this right now. 460GX isn't claiming PCI + * writes from the card into the AGP aperture. Because of this, + * we have to get space outside of the aperture for RPTR_ADDR. + */ + if (dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off; + + alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA); + atomic_inc(&virt_to_page(alt_rh_off)->count); + set_bit(PG_locked, &virt_to_page(alt_rh_off)->flags); + + dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off; + SET_RING_HEAD( &dev_priv->ring, 0 ); + rptr_addr = __pa( dev_priv->ring.head ); + } else +#endif + rptr_addr = dev_priv->ring_rptr->offset; + R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, rptr_addr ); } else { drm_sg_mem_t *entry = dev->sg; unsigned long tmp_ofs, page_ofs; @@ -350,11 +386,20 @@ tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle; page_ofs = tmp_ofs >> PAGE_SHIFT; +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", entry->busaddr[page_ofs], entry->handle + tmp_ofs ); +#else + R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, + page_to_bus(entry->pagelist[page_ofs])); + + DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n", + page_to_bus(entry->pagelist[page_ofs]), + entry->handle + tmp_ofs ); +#endif } /* Set watermark control */ @@ -550,9 +595,9 @@ init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cce_ring ); - DRM_IOREMAP( dev_priv->ring_rptr ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->cce_ring, dev ); + DRM_IOREMAP( dev_priv->ring_rptr, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -624,9 +669,9 @@ drm_r128_private_t *dev_priv = dev->dev_private; if ( !dev_priv->is_pci ) { - DRM_IOREMAPFREE( dev_priv->cce_ring ); - DRM_IOREMAPFREE( dev_priv->ring_rptr ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->cce_ring, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, @@ -634,6 +679,21 @@ DRM_ERROR( "failed to cleanup PCI GART!\n" ); } +#if defined(CONFIG_AGP_I460) && defined(__ia64__) + /* + * Free the page we grabbed for RPTR_ADDR + */ + if( !dev_priv->is_pci && dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off = + (unsigned long) dev_priv->ring.head; + + atomic_dec(&virt_to_page(alt_rh_off)->count); + clear_bit(PG_locked, &virt_to_page(alt_rh_off)->flags); + wake_up(&virt_to_page(alt_rh_off)->wait); + free_page(alt_rh_off); + } +#endif + DRM(free)( dev->dev_private, sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; diff -Nru a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c --- a/drivers/char/drm/radeon_cp.c Fri Apr 26 00:01:26 2002 +++ b/drivers/char/drm/radeon_cp.c Fri Apr 26 00:01:26 2002 @@ -575,7 +575,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, drm_radeon_private_t *dev_priv ) { - u32 ring_start, cur_read_ptr; + u32 ring_start, cur_read_ptr, rptr_addr; u32 tmp; /* Initialize the memory controller */ @@ -612,8 +612,28 @@ dev_priv->ring.tail = cur_read_ptr; if ( !dev_priv->is_pci ) { - RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, - dev_priv->ring_rptr->offset ); +#ifdef CONFIG_AGP_I460 + /* + * XXX - This is a 460GX specific hack + * + * We have to hack this right now. 460GX isn't claiming PCI + * writes from the card into the AGP aperture. Because of this, + * we have to get space outside of the aperture for RPTR_ADDR. + */ + if( dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off; + + alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA); + atomic_inc(&virt_to_page(alt_rh_off)->count); + set_bit(PG_locked, &virt_to_page(alt_rh_off)->flags); + + dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off; + *dev_priv->ring.head = cur_read_ptr; + rptr_addr = __pa( dev_priv->ring.head ); + } else +#endif + rptr_addr = dev_priv->ring_rptr->offset; + RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, rptr_addr ); } else { drm_sg_mem_t *entry = dev->sg; unsigned long tmp_ofs, page_ofs; @@ -621,11 +641,19 @@ tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle; page_ofs = tmp_ofs >> PAGE_SHIFT; +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) + RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, + entry->busaddr[page_ofs]); + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], + entry->handle + tmp_ofs ); +#else RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", entry->busaddr[page_ofs], entry->handle + tmp_ofs ); +#endif } /* Set ring buffer size */ @@ -836,9 +864,9 @@ init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cp_ring ); - DRM_IOREMAP( dev_priv->ring_rptr ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->cp_ring, dev ); + DRM_IOREMAP( dev_priv->ring_rptr, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -983,9 +1011,9 @@ drm_radeon_private_t *dev_priv = dev->dev_private; if ( !dev_priv->is_pci ) { - DRM_IOREMAPFREE( dev_priv->cp_ring ); - DRM_IOREMAPFREE( dev_priv->ring_rptr ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->cp_ring, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, @@ -993,8 +1021,20 @@ DRM_ERROR( "failed to cleanup PCI GART!\n" ); } - DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), - DRM_MEM_DRIVER ); +#ifdef CONFIG_AGP_I460 + /* + * Free the page we grabbed for RPTR_ADDR + */ + if( !dev_priv->is_pci && dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off = (unsigned long) dev_priv->ring.head; + + atomic_dec(&virt_to_page(alt_rh_off)->count); + clear_bit(PG_locked, &virt_to_page(alt_rh_off)->flags); + wake_up(&virt_to_page(alt_rh_off)->wait); + free_page(alt_rh_off); + } +#endif + DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } diff -Nru a/drivers/char/mem.c b/drivers/char/mem.c --- a/drivers/char/mem.c Fri Apr 26 00:01:27 2002 +++ b/drivers/char/mem.c Fri Apr 26 00:01:27 2002 @@ -518,6 +518,7 @@ default: ret = -EINVAL; } + force_successful_syscall_return(); unlock_kernel(); return ret; } diff -Nru a/drivers/char/pc_keyb.c b/drivers/char/pc_keyb.c --- a/drivers/char/pc_keyb.c Fri Apr 26 00:01:27 2002 +++ b/drivers/char/pc_keyb.c Fri Apr 26 00:01:27 2002 @@ -808,6 +808,17 @@ { int status; +#ifdef CONFIG_IA64 + /* + * This is not really IA-64 specific. Probably ought to be done on all platforms + * that are (potentially) legacy-free. + */ + if (kbd_read_status() == 0xff && kbd_read_input() == 0xff) { + kbd_exists = 0; + return "No keyboard controller preset"; + } +#endif + /* * Test the keyboard interface. * This seems to be the only way to get it going. @@ -910,6 +921,10 @@ char *msg = initialize_kbd(); if (msg) printk(KERN_WARNING "initialize_kbd: %s\n", msg); +#ifdef CONFIG_IA64 + if (!kbd_exists) + return; +#endif } #if defined CONFIG_PSMOUSE diff -Nru a/drivers/ide/ide-geometry.c b/drivers/ide/ide-geometry.c --- a/drivers/ide/ide-geometry.c Fri Apr 26 00:01:26 2002 +++ b/drivers/ide/ide-geometry.c Fri Apr 26 00:01:26 2002 @@ -9,8 +9,11 @@ #include #include #include -#include #include + +#ifndef CONFIG_IA64 +# include +#endif #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) diff -Nru a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile --- a/drivers/media/radio/Makefile Fri Apr 26 00:01:26 2002 +++ b/drivers/media/radio/Makefile Fri Apr 26 00:01:26 2002 @@ -4,7 +4,7 @@ # Object file lists. -obj-y := +obj-y := dummy.o obj-m := obj-n := obj- := diff -Nru a/drivers/media/radio/dummy.c b/drivers/media/radio/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/media/radio/dummy.c Fri Apr 26 00:01:27 2002 @@ -0,0 +1 @@ +/* just so the linker knows what kind of object files it's deadling with... */ diff -Nru a/drivers/media/video/Makefile b/drivers/media/video/Makefile --- a/drivers/media/video/Makefile Fri Apr 26 00:01:27 2002 +++ b/drivers/media/video/Makefile Fri Apr 26 00:01:27 2002 @@ -4,7 +4,7 @@ # Object file lists. -obj-y := +obj-y := dummy.o obj-m := obj-n := obj- := diff -Nru a/drivers/media/video/dummy.c b/drivers/media/video/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/media/video/dummy.c Fri Apr 26 00:01:27 2002 @@ -0,0 +1 @@ +/* just so the linker knows what kind of object files it's deadling with... */ diff -Nru a/drivers/message/fusion/isense.c b/drivers/message/fusion/isense.c --- a/drivers/message/fusion/isense.c Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/isense.c Fri Apr 26 00:01:27 2002 @@ -5,12 +5,13 @@ * Error Report logging output. This module implements SCSI-3 * Opcode lookup and a sorted table of SCSI-3 ASC/ASCQ strings. * - * Copyright (c) 1991-2001 Steven J. Ralston + * Copyright (c) 1991-2002 Steven J. Ralston * Written By: Steven J. Ralston * (yes I wrote some of the orig. code back in 1991!) - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: isense.c,v 1.28.14.1 2001/08/24 20:07:04 sralston Exp $ + * $Id: isense.c,v 1.33 2002/02/27 18:44:19 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -49,11 +50,15 @@ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -#include +#include #include +#include #include #include -#include +#include +#if defined (__sparc__) +#include +#endif /* Hmmm, avoid undefined spinlock_t on lk-2.2.14-5.0 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) @@ -61,7 +66,7 @@ #endif #define MODULEAUTHOR "Steven J. Ralston" -#define COPYRIGHT "Copyright (c) 2001 " MODULEAUTHOR +#define COPYRIGHT "Copyright (c) 2001-2002 " MODULEAUTHOR #include "mptbase.h" #include "isense.h" @@ -87,7 +92,6 @@ EXPORT_NO_SYMBOLS; MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); -MODULE_LICENSE("GPL"); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ int __init isense_init(void) diff -Nru a/drivers/message/fusion/lsi/fc_log.h b/drivers/message/fusion/lsi/fc_log.h --- a/drivers/message/fusion/lsi/fc_log.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/fc_log.h Fri Apr 26 00:01:26 2002 @@ -7,7 +7,7 @@ * in the IOCLogInfo field of a MPI Default Reply Message. * * CREATION DATE: 6/02/2000 - * ID: $Id: fc_log.h,v 4.5 2001/06/07 19:18:00 sschremm Exp $ + * ID: $Id: fc_log.h,v 4.6 2001/07/26 14:41:33 sschremm Exp $ */ @@ -62,7 +62,7 @@ MPI_IOCLOGINFO_FC_TARGET_MRSP_KILLED_BY_LIP = 0x2100000a, /* Manual Response not sent due to a LIP */ MPI_IOCLOGINFO_FC_TARGET_NO_CLASS_3 = 0x2100000b, /* not sent because remote node does not support Class 3 */ MPI_IOCLOGINFO_FC_TARGET_LOGIN_NOT_VALID = 0x2100000c, /* not sent because login to remote node not validated */ - MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound after a logout */ + MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound queue after a logout */ MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN = 0x2100000f, /* cleared waiting for data after a logout */ MPI_IOCLOGINFO_FC_LAN_BASE = 0x22000000, diff -Nru a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h --- a/drivers/message/fusion/lsi/mpi.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/mpi.h Fri Apr 26 00:01:26 2002 @@ -6,7 +6,7 @@ * Title: MPI Message independent structures and definitions * Creation Date: July 27, 2000 * - * MPI Version: 01.01.07 + * MPI Version: 01.02.03 * * Version History * --------------- @@ -39,6 +39,11 @@ * Added function codes for RAID. * 04-09-01 01.01.07 Added alternate define for MPI_DOORBELL_ACTIVE, * MPI_DOORBELL_USED, to better match the spec. + * 08-08-01 01.02.01 Original release for v1.2 work. + * Changed MPI_VERSION_MINOR from 0x01 to 0x02. + * Added define MPI_FUNCTION_TOOLBOX. + * 09-28-01 01.02.02 New function code MPI_SCSI_ENCLOSURE_PROCESSOR. + * 11-01-01 01.02.03 Changed name to MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR. * -------------------------------------------------------------------------- */ @@ -53,7 +58,7 @@ *****************************************************************************/ #define MPI_VERSION_MAJOR (0x01) -#define MPI_VERSION_MINOR (0x01) +#define MPI_VERSION_MINOR (0x02) #define MPI_VERSION ((MPI_VERSION_MAJOR << 8) | MPI_VERSION_MINOR) /* Note: The major versions of 0xe0 through 0xff are reserved */ @@ -216,8 +221,12 @@ #define MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND (0x13) #define MPI_FUNCTION_FC_PRIMITIVE_SEND (0x14) -#define MPI_FUNCTION_RAID_VOLUME (0x15) +#define MPI_FUNCTION_RAID_ACTION (0x15) #define MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) + +#define MPI_FUNCTION_TOOLBOX (0x17) + +#define MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) #define MPI_FUNCTION_LAN_SEND (0x20) #define MPI_FUNCTION_LAN_RECEIVE (0x21) diff -Nru a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h --- a/drivers/message/fusion/lsi/mpi_cnfg.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/mpi_cnfg.h Fri Apr 26 00:01:26 2002 @@ -6,7 +6,7 @@ * Title: MPI Config message, structures, and Pages * Creation Date: July 27, 2000 * - * MPI Version: 01.01.11 + * MPI Version: 01.02.05 * * Version History * --------------- @@ -72,6 +72,42 @@ * Added IO Unit Page 3. * Modified defines for Scsi Port Page 2. * Modified RAID Volume Pages. + * 08-08-01 01.02.01 Original release for v1.2 work. + * Added SepID and SepBus to RVP2 IMPhysicalDisk struct. + * Added defines for the SEP bits in RVP2 VolumeSettings. + * Modified the DeviceSettings field in RVP2 to use the + * proper structure. + * Added defines for SES, SAF-TE, and cross channel for + * IOCPage2 CapabilitiesFlags. + * Removed define for MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE. + * Removed define for + * MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE. + * Added define for MPI_CONFIG_PAGEATTR_RO_PERSISTENT. + * 08-29-01 01.02.02 Fixed value for MPI_MANUFACTPAGE_DEVID_53C1035. + * Added defines for MPI_FCPORTPAGE1_FLAGS_HARD_ALPA_ONLY + * and MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY. + * Removed MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS, + * MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS, and + * MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS, and + * MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED. + * Added defines for MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED + * and MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED. + * Added OnBusTimerValue to CONFIG_PAGE_SCSI_PORT_1. + * Added rejected bits to SCSI Device Page 0 Information. + * Increased size of ALPA array in FC Port Page 2 by one + * and removed a one byte reserved field. + * 09-28-01 01.02.03 Swapped NegWireSpeedLow and NegWireSpeedLow in + * CONFIG_PAGE_LAN_1 to match preferred 64-bit ordering. + * Added structures for Manufacturing Page 4, IO Unit + * Page 3, IOC Page 3, IOC Page 4, RAID Volume Page 0, and + * RAID PhysDisk Page 0. + * 10-04-01 01.02.04 Added define for MPI_CONFIG_PAGETYPE_RAID_PHYSDISK. + * Modified some of the new defines to make them 32 + * character unique. + * Modified how variable length pages (arrays) are defined. + * Added generic defines for hot spare pools and RAID + * volume types. + * 11-01-01 01.02.05 Added define for MPI_IOUNITPAGE1_DISABLE_IR. * -------------------------------------------------------------------------- */ @@ -104,12 +140,13 @@ fCONFIG_PAGE_HEADER_UNION, MPI_POINTER PTR_CONFIG_PAGE_HEADER_UNION; -/****************************************************************************/ -/* PageType field values */ -/****************************************************************************/ +/**************************************************************************** +* PageType field values +****************************************************************************/ #define MPI_CONFIG_PAGEATTR_READ_ONLY (0x00) #define MPI_CONFIG_PAGEATTR_CHANGEABLE (0x10) #define MPI_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI_CONFIG_PAGEATTR_RO_PERSISTENT (0x30) #define MPI_CONFIG_PAGEATTR_MASK (0xF0) #define MPI_CONFIG_PAGETYPE_IO_UNIT (0x00) @@ -122,29 +159,21 @@ #define MPI_CONFIG_PAGETYPE_LAN (0x07) #define MPI_CONFIG_PAGETYPE_RAID_VOLUME (0x08) #define MPI_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) #define MPI_CONFIG_PAGETYPE_MASK (0x0F) #define MPI_CONFIG_TYPENUM_MASK (0x0FFF) /**************************************************************************** - * PageAddres field values - ****************************************************************************/ +* PageAddress field values +****************************************************************************/ #define MPI_SCSI_PORT_PGAD_PORT_MASK (0x000000FF) -#define MPI_SCSI_DEVICE_FORM_MASK (0xF0000000) -#define MPI_SCSI_DEVICE_FORM_TARGETID (0x00000000) -#define MPI_SCSI_DEVICE_FORM_RAID_PHYS_DEV_NUM (0x10000000) #define MPI_SCSI_DEVICE_TARGET_ID_MASK (0x000000FF) #define MPI_SCSI_DEVICE_TARGET_ID_SHIFT (0) #define MPI_SCSI_DEVICE_BUS_MASK (0x0000FF00) #define MPI_SCSI_DEVICE_BUS_SHIFT (8) -#define MPI_SCSI_DEVICE_VOLUME_TARG_ID_MASK (0x000000FF) -#define MPI_SCSI_DEVICE_VOLUME_TARG_ID_SHIFT (0) -#define MPI_SCSI_DEVICE_VOLUME_BUS_MASK (0x0000FF00) -#define MPI_SCSI_DEVICE_VOLUME_BUS_SHIFT (8) -#define MPI_SCSI_DEVICE_PHYS_DISK_NUM_MASK (0x00FF0000) -#define MPI_SCSI_DEVICE_PHYS_DISK_NUM_SHIFT (16) #define MPI_FC_PORT_PGAD_PORT_MASK (0xF0000000) #define MPI_FC_PORT_PGAD_PORT_SHIFT (28) @@ -167,10 +196,14 @@ #define MPI_FC_DEVICE_PGAD_BT_TID_MASK (0x000000FF) #define MPI_FC_DEVICE_PGAD_BT_TID_SHIFT (0) +#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT (0) + -/****************************************************************************/ -/* Config Request Message */ -/****************************************************************************/ + +/**************************************************************************** +* Config Request Message +****************************************************************************/ typedef struct _MSG_CONFIG { U8 Action; /* 00h */ @@ -181,16 +214,16 @@ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U8 Reserved2[8]; /* 0Ch */ - fCONFIG_PAGE_HEADER Header; /* 14h */ + fCONFIG_PAGE_HEADER Header; /* 14h */ U32 PageAddress; /* 18h */ SGE_IO_UNION PageBufferSGE; /* 1Ch */ } MSG_CONFIG, MPI_POINTER PTR_MSG_CONFIG, Config_t, MPI_POINTER pConfig_t; -/****************************************************************************/ -/* Action field values */ -/****************************************************************************/ +/**************************************************************************** +* Action field values +****************************************************************************/ #define MPI_CONFIG_ACTION_PAGE_HEADER (0x00) #define MPI_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) #define MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) @@ -213,7 +246,7 @@ U8 Reserved2[2]; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ - fCONFIG_PAGE_HEADER Header; /* 14h */ + fCONFIG_PAGE_HEADER Header; /* 14h */ } MSG_CONFIG_REPLY, MPI_POINTER PTR_MSG_CONFIG_REPLY, ConfigReply_t, MPI_POINTER pConfigReply_t; @@ -225,19 +258,24 @@ * *****************************************************************************/ -/****************************************************************************/ -/* Manufacturing Config pages */ -/****************************************************************************/ +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ #define MPI_MANUFACTPAGE_DEVICEID_FC909 (0x0621) #define MPI_MANUFACTPAGE_DEVICEID_FC919 (0x0624) #define MPI_MANUFACTPAGE_DEVICEID_FC929 (0x0622) +#define MPI_MANUFACTPAGE_DEVICEID_FC919X (0x0628) +#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626) #define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030) #define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031) -#define MPI_MANUFACTPAGE_DEVID_53C1035 (0x0035) +#define MPI_MANUFACTPAGE_DEVID_1030_53C1035 (0x0032) +#define MPI_MANUFACTPAGE_DEVID_1030ZC_53C1035 (0x0033) +#define MPI_MANUFACTPAGE_DEVID_53C1035 (0x0040) +#define MPI_MANUFACTPAGE_DEVID_53C1035ZC (0x0041) typedef struct _CONFIG_PAGE_MANUFACTURING_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U8 ChipName[16]; /* 04h */ U8 ChipRevision[8]; /* 14h */ U8 BoardName[16]; /* 1Ch */ @@ -252,7 +290,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U8 VPD[256]; /* 04h */ } fCONFIG_PAGE_MANUFACTURING_1, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_1, ManufacturingPage1_t, MPI_POINTER pManufacturingPage1_t; @@ -269,35 +307,72 @@ MpiChipRevisionId_t, MPI_POINTER pMpiChipRevisionId_t; +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + typedef struct _CONFIG_PAGE_MANUFACTURING_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - MPI_CHIP_REVISION_ID ChipId; /* 04h */ - U32 HwSettings[1]; /* 08h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + MPI_CHIP_REVISION_ID ChipId; /* 04h */ + U32 HwSettings[MPI_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 08h */ } fCONFIG_PAGE_MANUFACTURING_2, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_2, ManufacturingPage2_t, MPI_POINTER pManufacturingPage2_t; #define MPI_MANUFACTURING2_PAGEVERSION (0x00) +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_MAN_PAGE_3_INFO_WORDS +#define MPI_MAN_PAGE_3_INFO_WORDS (1) +#endif + typedef struct _CONFIG_PAGE_MANUFACTURING_3 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - MPI_CHIP_REVISION_ID ChipId; /* 04h */ - U32 Info[1]; /* 08h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + MPI_CHIP_REVISION_ID ChipId; /* 04h */ + U32 Info[MPI_MAN_PAGE_3_INFO_WORDS];/* 08h */ } fCONFIG_PAGE_MANUFACTURING_3, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_3, ManufacturingPage3_t, MPI_POINTER pManufacturingPage3_t; #define MPI_MANUFACTURING3_PAGEVERSION (0x00) -/****************************************************************************/ -/* IO Unit Config Pages */ -/****************************************************************************/ +typedef struct _CONFIG_PAGE_MANUFACTURING_4 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U32 Reserved1; /* 04h */ + U8 InfoOffset0; /* 08h */ + U8 InfoSize0; /* 09h */ + U8 InfoOffset1; /* 0Ah */ + U8 InfoSize1; /* 0Bh */ + U8 InquirySize; /* 0Ch */ + U8 Reserved2; /* 0Dh */ + U16 Reserved3; /* 0Eh */ + U8 InquiryData[56]; /* 10h */ + U32 ISVolumeSettings; /* 48h */ + U32 IMEVolumeSettings; /* 4Ch */ + U32 IMVolumeSettings; /* 50h */ +} fCONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4, + ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t; + +#define MPI_MANUFACTURING4_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_IO_UNIT_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U64 UniqueValue; /* 04h */ } fCONFIG_PAGE_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_0, IOUnitPage0_t, MPI_POINTER pIOUnitPage0_t; @@ -307,18 +382,20 @@ typedef struct _CONFIG_PAGE_IO_UNIT_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ } fCONFIG_PAGE_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_1, IOUnitPage1_t, MPI_POINTER pIOUnitPage1_t; #define MPI_IOUNITPAGE1_PAGEVERSION (0x00) +/* IO Unit Page 1 Flags defines */ + #define MPI_IOUNITPAGE1_MULTI_FUNCTION (0x00000000) #define MPI_IOUNITPAGE1_SINGLE_FUNCTION (0x00000001) #define MPI_IOUNITPAGE1_MULTI_PATHING (0x00000002) #define MPI_IOUNITPAGE1_SINGLE_PATHING (0x00000000) - +#define MPI_IOUNITPAGE1_DISABLE_IR (0x00000040) #define MPI_IOUNITPAGE1_FORCE_32 (0x00000080) @@ -335,7 +412,7 @@ typedef struct _CONFIG_PAGE_IO_UNIT_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U32 BiosVersion; /* 08h */ MPI_ADAPTER_INFO AdapterOrder[4]; /* 0Ch */ @@ -344,38 +421,45 @@ #define MPI_IOUNITPAGE2_PAGEVERSION (0x00) -#define MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE (0x00000001) #define MPI_IOUNITPAGE2_FLAGS_PAUSE_ON_ERROR (0x00000002) #define MPI_IOUNITPAGE2_FLAGS_VERBOSE_ENABLE (0x00000004) #define MPI_IOUNITPAGE2_FLAGS_COLOR_VIDEO_DISABLE (0x00000008) #define MPI_IOUNITPAGE2_FLAGS_DONT_HOOK_INT_40 (0x00000010) +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) +#endif + typedef struct _CONFIG_PAGE_IO_UNIT_3 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - U32 VolumeSettings; /* 04h */ - U8 InfoOffset0; /* 08h */ - U8 InfoSize0; /* 09h */ - U8 InfoOffset1; /* 0Ah */ - U8 InfoSize1; /* 0Bh */ - U8 InquirySize; /* 0Ch */ - U8 Reserved; /* 0Dh */ - U16 Reserved2; /* 0Eh */ - U8 InquiryData[56]; /* 10h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 GPIOCount; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 06h */ + U16 GPIOVal[MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX]; /* 08h */ } fCONFIG_PAGE_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_3, IOUnitPage3_t, MPI_POINTER pIOUnitPage3_t; -#define MPI_IOUNITPAGE3_PAGEVERSION (0x00) +#define MPI_IOUNITPAGE3_PAGEVERSION (0x01) + +#define MPI_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFC) +#define MPI_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI_IOUNITPAGE3_GPIO_SETTING_OFF (0x00) +#define MPI_IOUNITPAGE3_GPIO_SETTING_ON (0x01) -/****************************************************************************/ -/* IOC Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_IOC_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 TotalNVStore; /* 04h */ U32 FreeNVStore; /* 08h */ U16 VendorID; /* 0Ch */ @@ -393,7 +477,7 @@ typedef struct _CONFIG_PAGE_IOC_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U32 CoalescingTimeout; /* 08h */ U8 CoalescingDepth; /* 0Ch */ @@ -408,53 +492,120 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL { - U8 VolumeTargetID; /* 00h */ - U8 VolumeBus; /* 01h */ - U16 Reserved; /* 02h */ - U8 VolumeVersionMinor; /* 04h */ - U8 VolumeVersionMajor; /* 05h */ - U8 VolumeRaidType; /* 06h */ - U8 Reserved1; /* 07h */ + U8 VolumeID; /* 00h */ + U8 VolumeBus; /* 01h */ + U8 VolumeIOC; /* 02h */ + U8 VolumePageNumber; /* 03h */ + U8 VolumeType; /* 04h */ + U8 Reserved2; /* 05h */ + U16 Reserved3; /* 06h */ } fCONFIG_PAGE_IOC_2_RAID_VOL, MPI_POINTER PTR_CONFIG_PAGE_IOC_2_RAID_VOL, ConfigPageIoc2RaidVol_t, MPI_POINTER pConfigPageIoc2RaidVol_t; +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX +#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1) +#endif + typedef struct _CONFIG_PAGE_IOC_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - U32 CapabilitiesFlags; /* 04h */ - U8 NumActiveVolumes; /* 08h */ - U8 MaxVolumes; /* 09h */ - U16 Reserved; /* 0Ah */ - fCONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[1]; /* 0Ch */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U32 CapabilitiesFlags; /* 04h */ + U8 NumActiveVolumes; /* 08h */ + U8 MaxVolumes; /* 09h */ + U8 NumActivePhysDisks; /* 0Ah */ + U8 MaxPhysDisks; /* 0Bh */ + fCONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */ } fCONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, IOCPage2_t, MPI_POINTER pIOCPage2_t; -#define MPI_IOCPAGE2_PAGEVERSION (0x00) +#define MPI_IOCPAGE2_PAGEVERSION (0x01) /* IOC Page 2 Capabilities flags */ -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_0_SUPPORT (0x00000001) -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_1_SUPPORT (0x00000002) -#define MPI_IOCPAGE2_CAP_FLAGS_LSI_MIRROR_SUPPORT (0x00000004) -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_5_SUPPORT (0x00000008) -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000010) - -/* IOC Page 2 Volume RAID Type values */ - -#define MPI_IOCPAGE2_VOL_TYPE_RAID_0 (0x00) -#define MPI_IOCPAGE2_VOL_TYPE_RAID_1 (0x01) -#define MPI_IOCPAGE2_VOL_TYPE_LSI_MIRROR (0x02) -#define MPI_IOCPAGE2_VOL_TYPE_RAID_5 (0x05) -#define MPI_IOCPAGE2_VOL_TYPE_RAID_10 (0x0A) - - -/****************************************************************************/ -/* SCSI Port Config Pages */ -/****************************************************************************/ +#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001) +#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002) +#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004) +#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000) +#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000) +#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000) + +/* IOC Page 2 Volume RAID Type values, also used in RAID Volume pages */ + +#define MPI_RAID_VOL_TYPE_IS (0x00) +#define MPI_RAID_VOL_TYPE_IME (0x01) +#define MPI_RAID_VOL_TYPE_IM (0x02) + + +typedef struct _IOC_3_PHYS_DISK +{ + U8 PhysDiskID; /* 00h */ + U8 PhysDiskBus; /* 01h */ + U8 PhysDiskIOC; /* 02h */ + U8 PhysDiskNum; /* 03h */ +} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK, + Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX +#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1) +#endif + +typedef struct _CONFIG_PAGE_IOC_3 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 NumPhysDisks; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 06h */ + IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */ +} fCONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3, + IOCPage3_t, MPI_POINTER pIOCPage3_t; + +#define MPI_IOCPAGE3_PAGEVERSION (0x00) + + +typedef struct _IOC_4_SEP +{ + U8 SEPTargetID; /* 00h */ + U8 SEPBus; /* 01h */ + U16 Reserved; /* 02h */ +} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP, + Ioc4Sep_t, MPI_POINTER pIoc4Sep_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IOC_PAGE_4_SEP_MAX +#define MPI_IOC_PAGE_4_SEP_MAX (1) +#endif + +typedef struct _CONFIG_PAGE_IOC_4 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 ActiveSEP; /* 04h */ + U8 MaxSEP; /* 05h */ + U16 Reserved1; /* 06h */ + IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */ +} fCONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4, + IOCPage4_t, MPI_POINTER pIOCPage4_t; + +#define MPI_IOCPAGE4_PAGEVERSION (0x00) + + +/**************************************************************************** +* SCSI Port Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_SCSI_PORT_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Capabilities; /* 04h */ U32 PhysicalInterface; /* 08h */ } fCONFIG_PAGE_SCSI_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_0, @@ -465,7 +616,6 @@ #define MPI_SCSIPORTPAGE0_CAP_IU (0x00000001) #define MPI_SCSIPORTPAGE0_CAP_DT (0x00000002) #define MPI_SCSIPORTPAGE0_CAP_QAS (0x00000004) -#define MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS (0x00000008) #define MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK (0x0000FF00) #define MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK (0x00FF0000) #define MPI_SCSIPORTPAGE0_CAP_WIDE (0x20000000) @@ -479,12 +629,13 @@ typedef struct _CONFIG_PAGE_SCSI_PORT_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Configuration; /* 04h */ + U32 OnBusTimerValue; /* 08h */ } fCONFIG_PAGE_SCSI_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_1, SCSIPortPage1_t, MPI_POINTER pSCSIPortPage1_t; -#define MPI_SCSIPORTPAGE1_PAGEVERSION (0x01) +#define MPI_SCSIPORTPAGE1_PAGEVERSION (0x02) #define MPI_SCSIPORTPAGE1_CFG_PORT_SCSI_ID_MASK (0x000000FF) #define MPI_SCSIPORTPAGE1_CFG_PORT_RESPONSE_ID_MASK (0xFFFF0000) @@ -500,7 +651,7 @@ typedef struct _CONFIG_PAGE_SCSI_PORT_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 PortFlags; /* 04h */ U32 PortSettings; /* 08h */ MPI_DEVICE_INFO DeviceSettings[16]; /* 0Ch */ @@ -510,7 +661,6 @@ #define MPI_SCSIPORTPAGE2_PAGEVERSION (0x01) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_SCAN_HIGH_TO_LOW (0x00000001) -#define MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE (0x00000002) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET (0x00000004) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_ALTERNATE_CHS (0x00000008) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_TERMINATION_DISABLE (0x00000010) @@ -536,47 +686,48 @@ #define MPI_SCSIPORTPAGE2_DEVICE_BOOT_CHOICE (0x0020) -/****************************************************************************/ -/* SCSI Target Device Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* SCSI Target Device Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_SCSI_DEVICE_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 NegotiatedParameters; /* 04h */ U32 Information; /* 08h */ } fCONFIG_PAGE_SCSI_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_0, SCSIDevicePage0_t, MPI_POINTER pSCSIDevicePage0_t; -#define MPI_SCSIDEVPAGE0_PAGEVERSION (0x01) +#define MPI_SCSIDEVPAGE0_PAGEVERSION (0x02) #define MPI_SCSIDEVPAGE0_NP_IU (0x00000001) #define MPI_SCSIDEVPAGE0_NP_DT (0x00000002) #define MPI_SCSIDEVPAGE0_NP_QAS (0x00000004) -#define MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS (0x00000008) #define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK (0x0000FF00) #define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK (0x00FF0000) #define MPI_SCSIDEVPAGE0_NP_WIDE (0x20000000) #define MPI_SCSIDEVPAGE0_NP_AIP (0x80000000) #define MPI_SCSIDEVPAGE0_INFO_PARAMS_NEGOTIATED (0x00000001) +#define MPI_SCSIDEVPAGE0_INFO_SDTR_REJECTED (0x00000002) +#define MPI_SCSIDEVPAGE0_INFO_WDTR_REJECTED (0x00000004) +#define MPI_SCSIDEVPAGE0_INFO_PPR_REJECTED (0x00000008) typedef struct _CONFIG_PAGE_SCSI_DEVICE_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 RequestedParameters; /* 04h */ U32 Reserved; /* 08h */ U32 Configuration; /* 0Ch */ } fCONFIG_PAGE_SCSI_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_1, SCSIDevicePage1_t, MPI_POINTER pSCSIDevicePage1_t; -#define MPI_SCSIDEVPAGE1_PAGEVERSION (0x02) +#define MPI_SCSIDEVPAGE1_PAGEVERSION (0x03) #define MPI_SCSIDEVPAGE1_RP_IU (0x00000001) #define MPI_SCSIDEVPAGE1_RP_DT (0x00000002) #define MPI_SCSIDEVPAGE1_RP_QAS (0x00000004) -#define MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS (0x00000008) #define MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK (0x0000FF00) #define MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK (0x00FF0000) #define MPI_SCSIDEVPAGE1_RP_WIDE (0x20000000) @@ -585,12 +736,13 @@ #define MPI_SCSIDEVPAGE1_DV_LVD_DRIVE_STRENGTH_MASK (0x00000003) #define MPI_SCSIDEVPAGE1_DV_SE_SLEW_RATE_MASK (0x00000300) -#define MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED (0x00000001) +#define MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED (0x00000002) +#define MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED (0x00000004) typedef struct _CONFIG_PAGE_SCSI_DEVICE_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 DomainValidation; /* 04h */ U32 ParityPipeSelect; /* 08h */ U32 DataPipeSelect; /* 0Ch */ @@ -629,13 +781,13 @@ #define MPI_SCSIDEVPAGE2_DPS_BIT_15_PL_SELECT_MASK (0xC0000000) -/****************************************************************************/ -/* FC Port Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* FC Port Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_FC_PORT_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U8 MPIPortNumber; /* 08h */ U8 LinkType; /* 09h */ @@ -715,7 +867,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U64 NoSEEPROMWWNN; /* 08h */ U64 NoSEEPROMWWPN; /* 10h */ @@ -726,8 +878,10 @@ } fCONFIG_PAGE_FC_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_1, FCPortPage1_t, MPI_POINTER pFCPortPage1_t; -#define MPI_FCPORTPAGE1_PAGEVERSION (0x01) +#define MPI_FCPORTPAGE1_PAGEVERSION (0x02) +#define MPI_FCPORTPAGE1_FLAGS_EXT_FCP_STATUS_EN (0x08000000) +#define MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY (0x04000000) #define MPI_FCPORTPAGE1_FLAGS_SORT_BY_DID (0x00000001) #define MPI_FCPORTPAGE1_FLAGS_SORT_BY_WWN (0x00000000) @@ -747,22 +901,21 @@ #define MPI_FCPORTPAGE1_LCONFIG_SPEED_10GIG (0x03) #define MPI_FCPORTPAGE1_LCONFIG_SPEED_AUTO (0x0F) -#define MPI_FCPORTPAGE1_TOPOLGY_MASK (0x0F) -#define MPI_FCPORTPAGE1_TOPOLGY_NLPORT (0x01) -#define MPI_FCPORTPAGE1_TOPOLGY_NPORT (0x02) -#define MPI_FCPORTPAGE1_TOPOLGY_AUTO (0x0F) +#define MPI_FCPORTPAGE1_TOPOLOGY_MASK (0x0F) +#define MPI_FCPORTPAGE1_TOPOLOGY_NLPORT (0x01) +#define MPI_FCPORTPAGE1_TOPOLOGY_NPORT (0x02) +#define MPI_FCPORTPAGE1_TOPOLOGY_AUTO (0x0F) typedef struct _CONFIG_PAGE_FC_PORT_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U8 NumberActive; /* 04h */ - U8 ALPA[126]; /* 05h */ - U8 Reserved; /* 83h */ + U8 ALPA[127]; /* 05h */ } fCONFIG_PAGE_FC_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_2, FCPortPage2_t, MPI_POINTER pFCPortPage2_t; -#define MPI_FCPORTPAGE2_PAGEVERSION (0x00) +#define MPI_FCPORTPAGE2_PAGEVERSION (0x01) typedef struct _WWN_FORMAT @@ -795,10 +948,18 @@ #define MPI_PERSISTENT_FLAGS_BOOT_DEVICE (0x0008) #define MPI_PERSISTENT_FLAGS_BY_DID (0x0080) +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_FC_PORT_PAGE_3_ENTRY_MAX +#define MPI_FC_PORT_PAGE_3_ENTRY_MAX (1) +#endif + typedef struct _CONFIG_PAGE_FC_PORT_3 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - FC_PORT_PERSISTENT Entry[1]; /* 04h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + FC_PORT_PERSISTENT Entry[MPI_FC_PORT_PAGE_3_ENTRY_MAX]; /* 04h */ } fCONFIG_PAGE_FC_PORT_3, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_3, FCPortPage3_t, MPI_POINTER pFCPortPage3_t; @@ -807,7 +968,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_4 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 PortFlags; /* 04h */ U32 PortSettings; /* 08h */ } fCONFIG_PAGE_FC_PORT_4, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_4, @@ -833,13 +994,22 @@ U16 Reserved; /* 02h */ U64 AliasWWNN; /* 04h */ U64 AliasWWPN; /* 0Ch */ -} fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO, +} fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO, + MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO, FcPortPage5AliasInfo_t, MPI_POINTER pFcPortPage5AliasInfo_t; +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_FC_PORT_PAGE_5_ALIAS_MAX +#define MPI_FC_PORT_PAGE_5_ALIAS_MAX (1) +#endif + typedef struct _CONFIG_PAGE_FC_PORT_5 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO AliasInfo[1]; /* 04h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO AliasInfo[MPI_FC_PORT_PAGE_5_ALIAS_MAX];/* 04h */ } fCONFIG_PAGE_FC_PORT_5, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5, FCPortPage5_t, MPI_POINTER pFCPortPage5_t; @@ -851,7 +1021,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Reserved; /* 04h */ U64 TimeSinceReset; /* 08h */ U64 TxFrames; /* 10h */ @@ -877,7 +1047,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_7 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Reserved; /* 04h */ U8 PortSymbolicName[256]; /* 08h */ } fCONFIG_PAGE_FC_PORT_7, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_7, @@ -888,7 +1058,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_8 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 BitVector[8]; /* 04h */ } fCONFIG_PAGE_FC_PORT_8, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_8, FCPortPage8_t, MPI_POINTER pFCPortPage8_t; @@ -898,7 +1068,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_9 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Reserved; /* 04h */ U64 GlobalWWPN; /* 08h */ U64 GlobalWWNN; /* 10h */ @@ -916,13 +1086,13 @@ #define MPI_FCPORTPAGE9_PAGEVERSION (0x00) -/****************************************************************************/ -/* FC Device Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* FC Device Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_FC_DEVICE_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U64 WWNN; /* 04h */ U64 WWPN; /* 0Ch */ U32 PortIdentifier; /* 14h */ @@ -947,112 +1117,191 @@ #define MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET (0x02) #define MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR (0x04) -#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK (MPI_FC_DEVICE_PGAD_PORT_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK (MPI_FC_DEVICE_PGAD_FORM_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID) -#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID (MPI_FC_DEVICE_PGAD_FORM_BUS_TID) -#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK (MPI_FC_DEVICE_PGAD_ND_DID_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK (MPI_FC_DEVICE_PGAD_BT_BUS_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT) -#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK (MPI_FC_DEVICE_PGAD_BT_TID_MASK) - - -/****************************************************************************/ -/* RAID Volume Config Pages */ -/****************************************************************************/ - -typedef struct _RAIDVOL2_IM_PHYS_ID -{ - U8 TargetID; /* 00h */ - U8 Bus; /* 01h */ - U8 IocNumber; /* 02h */ - U8 PhysDiskNumber; /* 03h */ - U8 Reserved[8]; /* 04h */ - U8 PhysicalDiskIdentifier[16]; /* 0Ch */ - U8 VendorId[8]; /* 1Ch */ - U8 ProductId[16]; /* 24h */ - U8 ProductRevLevel[4]; /* 34h */ - U32 Reserved1; /* 38h */ - U8 Info[32]; /* 3Ch */ -} RAIDVOL2_IM_PHYS_ID, MPI_POINTER PTR_RAIDVOL2_IM_PHYS_ID, - RaidVol2ImPhysicalID_t, MPI_POINTER pRaidVol2ImPhysicalID_t; - -typedef struct _RAIDVOL2_IM_DISK_INFO -{ - U32 DiskStatus; /* 00h */ - U32 DeviceSettings; /* 04h */ - U16 ErrorCount; /* 08h */ - U16 Reserved; /* 0Ah */ - U8 ErrorCdbByte; /* 0Ch */ - U8 ErrorSenseKey; /* 0Dh */ - U8 ErrorASC; /* 0Eh */ - U8 ErrorASCQ; /* 0Fh */ - U16 SmartCount; /* 10h */ - U8 SmartASC; /* 12h */ - U8 SmartASCQ; /* 13h */ -} RAIDVOL2_IM_DISK_INFO, MPI_POINTER PTR_RAIDVOL2_IM_DISK_INFO, - RaidVol2ImDiskInfo_t, MPI_POINTER pRaidVol2ImDiskInfo_t; +#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK (MPI_FC_DEVICE_PGAD_PORT_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK (MPI_FC_DEVICE_PGAD_FORM_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID) +#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID (MPI_FC_DEVICE_PGAD_FORM_BUS_TID) +#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK (MPI_FC_DEVICE_PGAD_ND_DID_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK (MPI_FC_DEVICE_PGAD_BT_BUS_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT) +#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK (MPI_FC_DEVICE_PGAD_BT_TID_MASK) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +typedef struct _RAID_VOL0_PHYS_DISK +{ + U16 Reserved; /* 00h */ + U8 PhysDiskMap; /* 02h */ + U8 PhysDiskNum; /* 03h */ +} RAID_VOL0_PHYS_DISK, MPI_POINTER PTR_RAID_VOL0_PHYS_DISK, + RaidVol0PhysDisk_t, MPI_POINTER pRaidVol0PhysDisk_t; + +#define MPI_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _RAID_VOL0_STATUS +{ + U8 Flags; /* 00h */ + U8 State; /* 01h */ + U16 Reserved; /* 02h */ +} RAID_VOL0_STATUS, MPI_POINTER PTR_RAID_VOL0_STATUS, + RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t; + +/* RAID Volume Page 0 VolumeStatus defines */ + +#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01) +#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02) +#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04) + +#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00) +#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01) +#define MPI_RAIDVOL0_STATUS_STATE_FAILED (0x02) + +typedef struct _RAID_VOL0_SETTINGS +{ + U16 Settings; /* 00h */ + U8 HotSparePool; /* 01h */ /* MPI_RAID_HOT_SPARE_POOL_ */ + U8 Reserved; /* 02h */ +} RAID_VOL0_SETTINGS, MPI_POINTER PTR_RAID_VOL0_SETTINGS, + RaidVol0Settings, MPI_POINTER pRaidVol0Settings; + +/* RAID Volume Page 0 VolumeSettings defines */ + +#define MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE (0x0001) +#define MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART (0x0002) +#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004) +#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008) +#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010) +#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000) + +/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI_RAID_HOT_SPARE_POOL_7 (0x80) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _CONFIG_PAGE_RAID_VOL_0 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 VolumeID; /* 04h */ + U8 VolumeBus; /* 05h */ + U8 VolumeIOC; /* 06h */ + U8 VolumeType; /* 07h */ /* MPI_RAID_VOL_TYPE_ */ + RAID_VOL0_STATUS VolumeStatus; /* 08h */ + RAID_VOL0_SETTINGS VolumeSettings; /* 0Ch */ + U32 MaxLBA; /* 10h */ + U32 Reserved1; /* 14h */ + U32 StripeSize; /* 18h */ + U32 Reserved2; /* 1Ch */ + U32 Reserved3; /* 20h */ + U8 NumPhysDisks; /* 24h */ + U8 Reserved4; /* 25h */ + U16 Reserved5; /* 26h */ + RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */ +} fCONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, + RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; + +#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x00) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +typedef struct _RAID_PHYS_DISK0_ERROR_DATA +{ + U8 ErrorCdbByte; /* 00h */ + U8 ErrorSenseKey; /* 01h */ + U16 Reserved; /* 02h */ + U16 ErrorCount; /* 04h */ + U8 ErrorASC; /* 06h */ + U8 ErrorASCQ; /* 07h */ + U16 SmartCount; /* 08h */ + U8 SmartASC; /* 0Ah */ + U8 SmartASCQ; /* 0Bh */ +} RAID_PHYS_DISK0_ERROR_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_ERROR_DATA, + RaidPhysDisk0ErrorData_t, MPI_POINTER pRaidPhysDisk0ErrorData_t; + +typedef struct _RAID_PHYS_DISK_INQUIRY_DATA +{ + U8 VendorID[8]; /* 00h */ + U8 ProductID[16]; /* 08h */ + U8 ProductRevLevel[4]; /* 18h */ + U8 Info[32]; /* 1Ch */ +} RAID_PHYS_DISK0_INQUIRY_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_INQUIRY_DATA, + RaidPhysDisk0InquiryData, MPI_POINTER pRaidPhysDisk0InquiryData; + +typedef struct _RAID_PHYS_DISK0_SETTINGS +{ + U8 SepID; /* 00h */ + U8 SepBus; /* 01h */ + U8 HotSparePool; /* 02h */ /* MPI_RAID_HOT_SPARE_POOL_ */ + U8 PhysDiskSettings; /* 03h */ +} RAID_PHYS_DISK0_SETTINGS, MPI_POINTER PTR_RAID_PHYS_DISK0_SETTINGS, + RaidPhysDiskSettings_t, MPI_POINTER pRaidPhysDiskSettings_t; + +typedef struct _RAID_PHYS_DISK0_STATUS +{ + U8 Flags; /* 00h */ + U8 State; /* 01h */ + U16 Reserved; /* 02h */ +} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS, + RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t; /* RAID Volume 2 IM Physical Disk DiskStatus flags */ -#define MPI_RVP2_PHYS_DISK_PRIMARY (0x00000001) -#define MPI_RVP2_PHYS_DISK_SECONDARY (0x00000002) -#define MPI_RVP2_PHYS_DISK_HOT_SPARE (0x00000004) -#define MPI_RVP2_PHYS_DISK_OUT_OF_SYNC (0x00000008) -#define MPI_RVP2_PHYS_DISK_STATUS_MASK (0x00000F00) -#define MPI_RVP2_PHYS_DISK_STATUS_ONLINE (0x00000000) -#define MPI_RVP2_PHYS_DISK_STATUS_MISSING (0x00000100) -#define MPI_RVP2_PHYS_DISK_STATUS_NOT_COMPATIBLE (0x00000200) -#define MPI_RVP2_PHYS_DISK_STATUS_FAILED (0x00000300) -#define MPI_RVP2_PHYS_DISK_STATUS_INITIALIZING (0x00000400) -#define MPI_RVP2_PHYS_DISK_STATUS_OFFLINE_REQUESTED (0x00000500) -#define MPI_RVP2_PHYS_DISK_STATUS_OTHER_OFFLINE (0x00000F00) - - -typedef struct _RAIDVOL2_IM_PHYSICAL_DISK -{ - RAIDVOL2_IM_PHYS_ID Id; /* 00h */ - RAIDVOL2_IM_DISK_INFO Info; /* 5Ch */ -} RAIDVOL2_IM_PHYSICAL_DISK, MPI_POINTER PTR_RAIDVOL2_IM_PHYSICAL_DISK, - RaidVol2ImPhysicalDisk_t, MPI_POINTER pRaidVol2ImPhysicalDisk_t; - -#define MPI_RAIDVOLPAGE2_MAX_DISKS (3) - -typedef struct _CONFIG_PAGE_RAID_VOL_2 -{ - fCONFIG_PAGE_HEADER Header; /* 00h */ - U32 VolumeStatus; /* 04h */ - U32 VolumeSettings; /* 08h */ - U32 Reserved; /* 0Ch */ - U64 MaxLba; /* 10h */ - U32 BlockSize; /* 18h */ - U8 Reserved1; /* 1Ch */ - U8 NumPhysicalDisks; /* 1Dh */ - U16 Reserved2; /* 1Eh */ - RAIDVOL2_IM_PHYSICAL_DISK IMPhysicalDisk[MPI_RAIDVOLPAGE2_MAX_DISKS]; -} fCONFIG_PAGE_RAID_VOL_2, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_2, - RaidVolumePage2_t, MPI_POINTER pRaidVolumePage2_t; - -#define MPI_RAIDVOLPAGE2_PAGEVERSION (0x00) - -/* RAID Volume Page 2 VolumeStatus defines */ - -#define MPI_RAIDVOLPAGE2_STATUS_ENABLED (0x00000001) -#define MPI_RAIDVOLPAGE2_STATUS_QUIESCED (0x00000002) -#define MPI_RAIDVOLPAGE2_STATUS_RESYNC_IN_PROGRESS (0x00000004) -#define MPI_RAIDVOLPAGE2_STATUS_DEGRADED (0x00000008) - -/* RAID Volume Page 2 VolumeSettings defines */ - -#define MPI_RAIDVOLPAGE2_SETTING_WRITE_CACHING_ENABLE (0x00000001) -#define MPI_RAIDVOLPAGE2_SETTING_OFFLINE_ON_SMART (0x00000002) -#define MPI_RAIDVOLPAGE2_SETTING_AUTO_CONFIGURE (0x00000004) -#define MPI_RAIDVOLPAGE2_SETTING_USE_DEFAULTS (0x80000000) - - -/****************************************************************************/ -/* LAN Config Pages */ -/****************************************************************************/ +#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01) +#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02) + +#define MPI_PHYSDISK0_STATUS_ONLINE (0x00) +#define MPI_PHYSDISK0_STATUS_MISSING (0x01) +#define MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE (0x02) +#define MPI_PHYSDISK0_STATUS_FAILED (0x03) +#define MPI_PHYSDISK0_STATUS_INITIALIZING (0x04) +#define MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED (0x05) +#define MPI_PHYSDISK0_STATUS_FAILED_REQUESTED (0x06) +#define MPI_PHYSDISK0_STATUS_OTHER_OFFLINE (0xFF) + +typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 PhysDiskID; /* 04h */ + U8 PhysDiskBus; /* 05h */ + U8 PhysDiskIOC; /* 06h */ + U8 PhysDiskNum; /* 07h */ + RAID_PHYS_DISK0_SETTINGS PhysDiskSettings; /* 08h */ + U32 Reserved1; /* 0Ch */ + U32 Reserved2; /* 10h */ + U32 Reserved3; /* 14h */ + U8 DiskIdentifier[16]; /* 18h */ + RAID_PHYS_DISK0_INQUIRY_DATA InquiryData; /* 28h */ + RAID_PHYS_DISK0_STATUS PhysDiskStatus; /* 64h */ + U32 MaxLBA; /* 68h */ + RAID_PHYS_DISK0_ERROR_DATA ErrorData; /* 6Ch */ +} fCONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0, + RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t; + +#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x00) + + +/**************************************************************************** +* LAN Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_LAN_0 { @@ -1083,8 +1332,8 @@ U32 MaxWireSpeedHigh; /* 1Ch */ U32 BucketsRemaining; /* 20h */ U32 MaxReplySize; /* 24h */ - U32 NegWireSpeedHigh; /* 28h */ - U32 NegWireSpeedLow; /* 2Ch */ + U32 NegWireSpeedLow; /* 28h */ + U32 NegWireSpeedHigh; /* 2Ch */ } fCONFIG_PAGE_LAN_1, MPI_POINTER PTR_CONFIG_PAGE_LAN_1, LANPage1_t, MPI_POINTER pLANPage1_t; diff -Nru a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h --- a/drivers/message/fusion/lsi/mpi_fc.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/mpi_fc.h Fri Apr 26 00:01:26 2002 @@ -6,7 +6,7 @@ * Title: MPI Fibre Channel messages and structures * Creation Date: June 12, 2000 * - * MPI Version: 01.01.07 + * MPI Version: 01.02.02 * * Version History * --------------- @@ -32,6 +32,9 @@ * Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define. * Added structure offset comments. * 04-09-01 01.01.07 Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 09-28-01 01.02.02 Change name of reserved field in + * MSG_LINK_SERVICE_RSP_REPLY. * -------------------------------------------------------------------------- */ @@ -172,7 +175,7 @@ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U16 Reserved1; /* 04h */ - U8 Reserved2; /* 06h */ + U8 Reserved_0100_InitiatorIndex; /* 06h */ /* obsolete InitiatorIndex */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved3; /* 0Ch */ diff -Nru a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h --- a/drivers/message/fusion/lsi/mpi_init.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/mpi_init.h Fri Apr 26 00:01:26 2002 @@ -6,7 +6,7 @@ * Title: MPI initiator mode messages and structures * Creation Date: June 8, 2000 * - * MPI Version: 01.01.05 + * MPI Version: 01.02.04 * * Version History * --------------- @@ -22,6 +22,13 @@ * 02-20-01 01.01.03 Started using MPI_POINTER. * 03-27-01 01.01.04 Added structure offset comments. * 04-10-01 01.01.05 Added new MsgFlag for MSG_SCSI_TASK_MGMT. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 08-29-01 01.02.02 Added MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET. + * Added MPI_SCSI_STATE_QUEUE_TAG_REJECTED for + * MSG_SCSI_IO_REPLY. + * 09-28-01 01.02.03 Added structures and defines for SCSI Enclosure + * Processor messages. + * 10-04-01 01.02.04 Added defines for SEP request Action field. * -------------------------------------------------------------------------- */ @@ -151,6 +158,7 @@ #define MPI_SCSI_STATE_NO_SCSI_STATUS (0x04) #define MPI_SCSI_STATE_TERMINATED (0x08) #define MPI_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED (0x20) /* SCSIIO Reply ResponseInfo values */ /* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */ @@ -191,6 +199,7 @@ #define MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) #define MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) #define MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS (0x04) +#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) /* MsgFlags bits */ #define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00) @@ -215,5 +224,92 @@ U32 TerminationCount; /* 14h */ } MSG_SCSI_TASK_MGMT_REPLY, MPI_POINTER PTR_MSG_SCSI_TASK_MGMT_REPLY, SCSITaskMgmtReply_t, MPI_POINTER pSCSITaskMgmtReply_t; + + +/****************************************************************************/ +/* SCSI Enclosure Processor messages */ +/****************************************************************************/ + +typedef struct _MSG_SEP_REQUEST +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U8 Action; /* 04h */ + U8 Reserved1; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 SlotStatus; /* 0Ch */ +} MSG_SEP_REQUEST, MPI_POINTER PTR_MSG_SEP_REQUEST, + SEPRequest_t, MPI_POINTER pSEPRequest_t; + +/* Action defines */ +#define MPI_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI_SEP_REQ_ACTION_READ_STATUS (0x01) + +/* SlotStatus bits for MSG_SEP_REQUEST */ +#define MPI_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) +#define MPI_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI_SEP_REQ_SLOTSTATUS_PARITY_CHECK (0x00000020) +#define MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000) +#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000) +#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) +#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000) +#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000) +#define MPI_SEP_REQ_SLOTSTATUS_SWAP_RESET (0x80000000) + + +typedef struct _MSG_SEP_REPLY +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U8 Action; /* 04h */ + U8 Reserved1; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved3; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 SlotStatus; /* 14h */ +} MSG_SEP_REPLY, MPI_POINTER PTR_MSG_SEP_REPLY, + SEPReply_t, MPI_POINTER pSEPReply_t; + +/* SlotStatus bits for MSG_SEP_REPLY */ +#define MPI_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) +#define MPI_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI_SEP_REPLY_SLOTSTATUS_PARITY_CHECK (0x00000020) +#define MPI_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000) +#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000) +#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000) +#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000) +#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000) +#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) +#define MPI_SEP_REPLY_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000) +#define MPI_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x10000000) +#define MPI_SEP_REPLY_SLOTSTATUS_FAULT_SENSED (0x40000000) +#define MPI_SEP_REPLY_SLOTSTATUS_SWAPPED (0x80000000) #endif diff -Nru a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h --- a/drivers/message/fusion/lsi/mpi_ioc.h Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/lsi/mpi_ioc.h Fri Apr 26 00:01:27 2002 @@ -6,7 +6,7 @@ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: August 11, 2000 * - * MPI Version: 01.01.07 + * MPI Version: 01.02.04 * * Version History * --------------- @@ -38,6 +38,19 @@ * 03-27-01 01.01.06 Added defines for ProductId field of MPI_FW_HEADER. * Added structure offset comments. * 04-09-01 01.01.07 Added structure EVENT_DATA_EVENT_CHANGE. + * 08-08-01 01.02.01 Original release for v1.2 work. + * New format for FWVersion and ProductId in + * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. + * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and + * related structure and defines. + * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. + * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. + * Replaced a reserved field in MSG_IOC_FACTS_REPLY with + * IOCExceptions and changed DataImageSize to reserved. + * Added MPI_FW_DOWNLOAD_ITYPE_NVSTORE_DATA and + * MPI_FW_UPLOAD_ITYPE_NVDATA. + * 09-28-01 01.02.03 Modified Event Data for Integrated RAID. + * 11-01-01 01.02.04 Added defines for MPI_EXT_IMAGE_HEADER ImageType field. * -------------------------------------------------------------------------- */ @@ -73,6 +86,17 @@ } MSG_IOC_INIT, MPI_POINTER PTR_MSG_IOC_INIT, IOCInit_t, MPI_POINTER pIOCInit_t; +/* WhoInit values */ +#define MPI_WHOINIT_NO_ONE (0x00) +#define MPI_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI_WHOINIT_ROM_BIOS (0x02) +#define MPI_WHOINIT_PCI_PEER (0x03) +#define MPI_WHOINIT_HOST_DRIVER (0x04) +#define MPI_WHOINIT_MANUFACTURER (0x05) + +/* Flags values */ +#define MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE (0x01) + typedef struct _MSG_IOC_INIT_REPLY { U8 WhoInit; /* 00h */ @@ -90,14 +114,6 @@ } MSG_IOC_INIT_REPLY, MPI_POINTER PTR_MSG_IOC_INIT_REPLY, IOCInitReply_t, MPI_POINTER pIOCInitReply_t; -/* WhoInit values */ - -#define MPI_WHOINIT_NO_ONE (0x00) -#define MPI_WHOINIT_SYSTEM_BIOS (0x01) -#define MPI_WHOINIT_ROM_BIOS (0x02) -#define MPI_WHOINIT_PCI_PEER (0x03) -#define MPI_WHOINIT_HOST_DRIVER (0x04) -#define MPI_WHOINIT_MANUFACTURER (0x05) /****************************************************************************/ @@ -115,8 +131,21 @@ } MSG_IOC_FACTS, MPI_POINTER PTR_IOC_FACTS, IOCFacts_t, MPI_POINTER pIOCFacts_t; -/* IOC Facts Reply */ +typedef struct _MPI_FW_VERSION_STRUCT +{ + U8 Dev; /* 00h */ + U8 Unit; /* 01h */ + U8 Minor; /* 02h */ + U8 Major; /* 03h */ +} MPI_FW_VERSION_STRUCT; + +typedef union _MPI_FW_VERSION +{ + MPI_FW_VERSION_STRUCT Struct; + U32 Word; +} MPI_FW_VERSION; +/* IOC Facts Reply */ typedef struct _MSG_IOC_FACTS_REPLY { U16 MsgVersion; /* 00h */ @@ -126,7 +155,7 @@ U8 IOCNumber; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ - U16 Reserved2; /* 0Ch */ + U16 IOCExceptions; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U8 MaxChainDepth; /* 14h */ @@ -135,7 +164,7 @@ U8 Flags; /* 17h */ U16 ReplyQueueDepth; /* 18h */ U16 RequestFrameSize; /* 1Ah */ - U16 FWVersion; /* 1Ch */ + U16 Reserved_0101_FWVersion; /* 1Ch */ /* obsolete 16-bit FWVersion */ U16 ProductID; /* 1Eh */ U32 CurrentHostMfaHighAddr; /* 20h */ U16 GlobalCredits; /* 24h */ @@ -146,18 +175,20 @@ U8 MaxDevices; /* 2Eh */ U8 MaxBuses; /* 2Fh */ U32 FWImageSize; /* 30h */ - U32 DataImageSize; /* 34h */ + U32 Reserved4; /* 34h */ + MPI_FW_VERSION FWVersion; /* 38h */ } MSG_IOC_FACTS_REPLY, MPI_POINTER PTR_MSG_IOC_FACTS_REPLY, IOCFactsReply_t, MPI_POINTER pIOCFactsReply_t; -#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) -#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) + +#define MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) -#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01) -#define MPI_IOCFACTS_FLAGS_DATA_IMAGE_UPLOAD (0x02) +#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01) -#define MPI_IOCFACTS_EVENTSTATE_DISABLED (0x00) -#define MPI_IOCFACTS_EVENTSTATE_ENABLED (0x01) +#define MPI_IOCFACTS_EVENTSTATE_DISABLED (0x00) +#define MPI_IOCFACTS_EVENTSTATE_ENABLED (0x01) @@ -326,7 +357,6 @@ } MSG_EVENT_ACK_REPLY, MPI_POINTER PTR_MSG_EVENT_ACK_REPLY, EventAckReply_t, MPI_POINTER pEventAckReply_t; - /* Switch */ #define MPI_EVENT_NOTIFICATION_SWITCH_OFF (0x00) @@ -345,7 +375,9 @@ #define MPI_EVENT_LOOP_STATE_CHANGE (0x00000008) #define MPI_EVENT_LOGOUT (0x00000009) #define MPI_EVENT_EVENT_CHANGE (0x0000000A) -#define MPI_EVENT_RAID_STATUS_CHANGE (0x0000000B) +#define MPI_EVENT_INTEGRATED_RAID (0x0000000B) +#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C) +#define MPI_EVENT_ON_BUS_TIMER_EXPIRED (0x0000000D) /* AckRequired field values */ @@ -372,6 +404,27 @@ } EVENT_DATA_SCSI, MPI_POINTER PTR_EVENT_DATA_SCSI, EventDataScsi_t, MPI_POINTER pEventDataScsi_t; +/* SCSI Device Status Change Event data */ + +typedef struct _EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 ReasonCode; /* 02h */ + U8 LUN; /* 03h */ + U8 ASC; /* 04h */ + U8 ASCQ; /* 05h */ + U16 Reserved; /* 06h */ +} EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE, + MPI_POINTER PTR_EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE, + MpiEventDataScsiDeviceStatusChange_t, + MPI_POINTER pMpiEventDataScsiDeviceStatusChange_t; + +/* MPI SCSI Device Status Change Event data ReasonCode values */ +#define MPI_EVENT_SCSI_DEV_STAT_RC_ADDED (0x03) +#define MPI_EVENT_SCSI_DEV_STAT_RC_NOT_RESPONDING (0x04) +#define MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA (0x05) + /* MPI Link Status Change Event data */ typedef struct _EVENT_DATA_LINK_STATUS @@ -417,29 +470,34 @@ } EVENT_DATA_LOGOUT, MPI_POINTER PTR_EVENT_DATA_LOGOUT, EventDataLogout_t, MPI_POINTER pEventDataLogout_t; -/* MPI RAID Status Change Event data */ +/* MPI Integrated RAID Event data */ -typedef struct _EVENT_DATA_RAID_STATUS_CHANGE +typedef struct _EVENT_DATA_RAID { - U8 VolumeTargetID; /* 00h */ + U8 VolumeID; /* 00h */ U8 VolumeBus; /* 01h */ U8 ReasonCode; /* 02h */ U8 PhysDiskNum; /* 03h */ U8 ASC; /* 04h */ U8 ASCQ; /* 05h */ U16 Reserved; /* 06h */ -} EVENT_DATA_RAID_STATUS_CHANGE, MPI_POINTER PTR_EVENT_DATA_RAID_STATUS_CHANGE, - MpiEventDataRaidStatusChange_t, MPI_POINTER pMpiEventDataRaidStatusChange_t; - - -/* MPI RAID Status Change Event data ReasonCode values */ - -#define MPI_EVENT_RAID_DATA_RC_VOLUME_OPTIMAL (0x00) -#define MPI_EVENT_RAID_DATA_RC_VOLUME_DEGRADED (0x01) -#define MPI_EVENT_RAID_DATA_RC_STARTED_RESYNC (0x02) -#define MPI_EVENT_RAID_DATA_RC_DISK_ADDED (0x03) -#define MPI_EVENT_RAID_DATA_RC_DISK_NOT_RESPONDING (0x04) -#define MPI_EVENT_RAID_DATA_RC_SMART_DATA (0x05) + U32 SettingsStatus; /* 08h */ +} EVENT_DATA_RAID, MPI_POINTER PTR_EVENT_DATA_RAID, + MpiEventDataRaid_t, MPI_POINTER pMpiEventDataRaid_t; + +/* MPI Integrated RAID Event data ReasonCode values */ +#define MPI_EVENT_RAID_RC_VOLUME_CREATED (0x00) +#define MPI_EVENT_RAID_RC_VOLUME_DELETED (0x01) +#define MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED (0x02) +#define MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED (0x03) +#define MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED (0x04) +#define MPI_EVENT_RAID_RC_PHYSDISK_CREATED (0x05) +#define MPI_EVENT_RAID_RC_PHYSDISK_DELETED (0x06) +#define MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED (0x07) +#define MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED (0x08) +#define MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED (0x09) +#define MPI_EVENT_RAID_RC_SMART_DATA (0x0A) +#define MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED (0x0B) /***************************************************************************** @@ -468,6 +526,7 @@ #define MPI_FW_DOWNLOAD_ITYPE_RESERVED (0x00) #define MPI_FW_DOWNLOAD_ITYPE_FW (0x01) #define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03) typedef struct _FWDownloadTCSGE @@ -476,7 +535,7 @@ U8 ContextSize; /* 01h */ U8 DetailsLength; /* 02h */ U8 Flags; /* 03h */ - U32 Reserved1; /* 04h */ + U32 Reserved_0100_Checksum; /* 04h */ /* obsolete Checksum */ U32 ImageOffset; /* 08h */ U32 ImageSize; /* 0Ch */ } FW_DOWNLOAD_TCSGE, MPI_POINTER PTR_FW_DOWNLOAD_TCSGE, @@ -519,7 +578,7 @@ #define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00) #define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01) #define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) -#define MPI_FW_UPLOAD_ITYPE_DATA_IOC_MEM (0x03) +#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03) typedef struct _FWUploadTCSGE { @@ -563,11 +622,10 @@ U32 Checksum; /* 1Ch */ U16 VendorId; /* 20h */ U16 ProductId; /* 22h */ - U16 FwVersion; /* 24h */ - U16 Reserved1; /* 26h */ + MPI_FW_VERSION FWVersion; /* 24h */ U32 SeqCodeVersion; /* 28h */ U32 ImageSize; /* 2Ch */ - U32 Reserved2; /* 30h */ + U32 NextImageHeaderOffset; /* 30h */ U32 LoadStartAddress; /* 34h */ U32 IopResetVectorValue; /* 38h */ U32 IopResetRegAddr; /* 3Ch */ @@ -581,30 +639,49 @@ #define MPI_FW_HEADER_WHAT_SIGNATURE (0x29232840) /* defines for using the ProductId field */ -#define MPI_FW_HEADER_PID_TYPE_MASK (0xF000) -#define MPI_FW_HEADER_PID_TYPE_SCSI (0x0000) -#define MPI_FW_HEADER_PID_TYPE_FC (0x1000) - -#define MPI_FW_HEADER_PID_FW_VENDOR_MASK (0x0F00) -#define MPI_FW_HEADER_PID_FW_VENDOR_LSI (0x0000) - -#define MPI_FW_HEADER_PID_FAMILY_MASK (0x000F) -#define MPI_FW_HEADER_PID_FAMILY_1030_SCSI (0x0000) -#define MPI_FW_HEADER_PID_FAMILY_909_FC (0x0000) -#define MPI_FW_HEADER_PID_FAMILY_919_FC (0x0001) -#define MPI_FW_HEADER_PID_FAMILY_919X_FC (0x0002) - - -typedef struct _MPI_DATA_HEADER -{ - U32 Signature; /* 00h */ - U16 FunctionNumber; /* 04h */ - U16 Length; /* 06h */ - U32 Checksum; /* 08h */ - U32 LoadStartAddress; /* 0Ch */ -} MPI_DATA_HEADER, MPI_POINTER PTR_MPI_DATA_HEADER, - MpiDataHeader_t, MPI_POINTER pMpiDataHeader_t; +#define MPI_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI_FW_HEADER_PID_TYPE_SCSI (0x0000) +#define MPI_FW_HEADER_PID_TYPE_FC (0x1000) + +#define MPI_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI (0x0100) +#define MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI_FW_HEADER_PID_PROD_TARGET_SCSI (0x0300) +#define MPI_FW_HEADER_PID_PROD_IM_SCSI (0x0400) +#define MPI_FW_HEADER_PID_PROD_IS_SCSI (0x0500) +#define MPI_FW_HEADER_PID_PROD_CTX_SCSI (0x0600) + +#define MPI_FW_HEADER_PID_FAMILY_MASK (0x00FF) +#define MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI (0x0001) +#define MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI (0x0002) +#define MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI (0x0003) +#define MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI (0x0004) +#define MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI (0x0005) +#define MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI (0x0006) +#define MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI (0x0007) +#define MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI (0x0008) +#define MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI (0x0009) +#define MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI (0x000A) +#define MPI_FW_HEADER_PID_FAMILY_909_FC (0x0000) +#define MPI_FW_HEADER_PID_FAMILY_919_FC (0x0001) +#define MPI_FW_HEADER_PID_FAMILY_919X_FC (0x0002) -#define MPI_DATA_HEADER_SIGNATURE (0x43504147) +typedef struct _MPI_EXT_IMAGE_HEADER +{ + U8 ImageType; /* 00h */ + U8 Reserved; /* 01h */ + U16 Reserved1; /* 02h */ + U32 Checksum; /* 04h */ + U32 ImageSize; /* 08h */ + U32 NextImageHeaderOffset; /* 0Ch */ + U32 LoadStartAddress; /* 10h */ + U32 Reserved2; /* 14h */ +} MPI_EXT_IMAGE_HEADER, MPI_POINTER PTR_MPI_EXT_IMAGE_HEADER, + MpiExtImageHeader_t, MPI_POINTER pMpiExtImageHeader_t; + +/* defines for the ImageType field */ +#define MPI_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI_EXT_IMAGE_TYPE_FW (0x01) +#define MPI_EXT_IMAGE_TYPE_NVDATA (0x03) #endif diff -Nru a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h --- a/drivers/message/fusion/lsi/mpi_lan.h Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/lsi/mpi_lan.h Fri Apr 26 00:01:27 2002 @@ -6,7 +6,7 @@ * Title: MPI LAN messages and structures * Creation Date: June 30, 2000 * - * MPI Version: 01.01.03 + * MPI Version: 01.02.01 * * Version History * --------------- @@ -27,6 +27,7 @@ * 11-02-00 01.01.01 Original release for post 1.0 work * 02-20-01 01.01.02 Started using MPI_POINTER. * 03-27-01 01.01.03 Added structure offset comments. + * 08-08-01 01.02.01 Original release for v1.2 work. * -------------------------------------------------------------------------- */ diff -Nru a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/message/fusion/lsi/mpi_raid.h Fri Apr 26 00:01:27 2002 @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2001 LSI Logic Corporation. + * + * + * Name: MPI_RAID.H + * Title: MPI RAID message and structures + * Creation Date: February 27, 2001 + * + * MPI Version: 01.02.04 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 02-27-01 01.01.01 Original release for this file. + * 03-27-01 01.01.02 Added structure offset comments. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 09-28-01 01.02.02 Major rework for MPI v1.2 Integrated RAID changes. + * 10-04-01 01.02.03 Added ActionData defines for + * MPI_RAID_ACTION_DELETE_VOLUME action. + * 11-01-01 01.02.04 Added define for MPI_RAID_ACTION_ADATA_DO_NOT_SYNC. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI_RAID_H +#define MPI_RAID_H + + +/****************************************************************************** +* +* R A I D M e s s a g e s +* +*******************************************************************************/ + + +/****************************************************************************/ +/* RAID Volume Request */ +/****************************************************************************/ + +typedef struct _MSG_RAID_ACTION +{ + U8 Action; /* 00h */ + U8 Reserved1; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U8 VolumeID; /* 04h */ + U8 VolumeBus; /* 05h */ + U8 PhysDiskNum; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved2; /* 0Ch */ + U32 ActionDataWord; /* 10h */ + SGE_SIMPLE_UNION ActionDataSGE; /* 14h */ +} MSG_RAID_ACTION_REQUEST, MPI_POINTER PTR_MSG_RAID_ACTION_REQUEST, + MpiRaidActionRequest_t , MPI_POINTER pMpiRaidActionRequest_t; + + +/* RAID Action request Action values */ + +#define MPI_RAID_ACTION_STATUS (0x00) +#define MPI_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI_RAID_ACTION_DISABLE_VOLUME (0x04) +#define MPI_RAID_ACTION_ENABLE_VOLUME (0x05) +#define MPI_RAID_ACTION_QUIESCE_PHYS_IO (0x06) +#define MPI_RAID_ACTION_ENABLE_PHYS_IO (0x07) +#define MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS (0x08) +#define MPI_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI_RAID_ACTION_CHANGE_PHYSDISK_SETTINGS (0x0C) +#define MPI_RAID_ACTION_CREATE_PHYSDISK (0x0D) +#define MPI_RAID_ACTION_DELETE_PHYSDISK (0x0E) +#define MPI_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI_RAID_ACTION_REPLACE_PHYSDISK (0x10) + +/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */ +#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001) + +/* ActionDataWord defines for use with MPI_RAID_ACTION_DELETE_VOLUME action */ +#define MPI_RAID_ACTION_ADATA_KEEP_PHYS_DISKS (0x00000000) +#define MPI_RAID_ACTION_ADATA_DEL_PHYS_DISKS (0x00000001) + + +/* RAID Action reply message */ + +typedef struct _MSG_RAID_ACTION_REPLY +{ + U8 Action; /* 00h */ + U8 Reserved; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U8 VolumeID; /* 04h */ + U8 VolumeBus; /* 05h */ + U8 PhysDiskNum; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 ActionStatus; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 VolumeStatus; /* 14h */ + U32 ActionData; /* 18h */ +} MSG_RAID_ACTION_REPLY, MPI_POINTER PTR_MSG_RAID_ACTION_REPLY, + MpiRaidActionReply_t, MPI_POINTER pMpiRaidActionReply_t; + + +/* RAID Volume reply ActionStatus values */ + +#define MPI_RAID_ACTION_ASTATUS_SUCCESS (0x0000) +#define MPI_RAID_ACTION_ASTATUS_INVALID_ACTION (0x0001) +#define MPI_RAID_ACTION_ASTATUS_FAILURE (0x0002) +#define MPI_RAID_ACTION_ASTATUS_IN_PROGRESS (0x0003) + + +/* RAID Volume reply RAID Volume Indicator structure */ + +typedef struct _MPI_RAID_VOL_INDICATOR +{ + U64 TotalBlocks; /* 00h */ + U64 BlocksRemaining; /* 08h */ +} MPI_RAID_VOL_INDICATOR, MPI_POINTER PTR_MPI_RAID_VOL_INDICATOR, + MpiRaidVolIndicator_t, MPI_POINTER pMpiRaidVolIndicator_t; + + +/****************************************************************************/ +/* SCSI IO RAID Passthrough Request */ +/****************************************************************************/ + +typedef struct _MSG_SCSI_IO_RAID_PT_REQUEST +{ + U8 PhysDiskNum; /* 00h */ + U8 Reserved1; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U8 CDBLength; /* 04h */ + U8 SenseBufferLength; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 LUN[8]; /* 0Ch */ + U32 Control; /* 14h */ + U8 CDB[16]; /* 18h */ + U32 DataLength; /* 28h */ + U32 SenseBufferLowAddr; /* 2Ch */ + SGE_IO_UNION SGL; /* 30h */ +} MSG_SCSI_IO_RAID_PT_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REQUEST, + SCSIIORaidPassthroughRequest_t, MPI_POINTER pSCSIIORaidPassthroughRequest_t; + + +/* SCSI IO RAID Passthrough reply structure */ + +typedef struct _MSG_SCSI_IO_RAID_PT_REPLY +{ + U8 PhysDiskNum; /* 00h */ + U8 Reserved1; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U8 CDBLength; /* 04h */ + U8 SenseBufferLength; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 SCSIStatus; /* 0Ch */ + U8 SCSIState; /* 0Dh */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 TransferCount; /* 14h */ + U32 SenseCount; /* 18h */ + U32 ResponseInfo; /* 1Ch */ +} MSG_SCSI_IO_RAID_PT_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REPLY, + SCSIIORaidPassthroughReply_t, MPI_POINTER pSCSIIORaidPassthroughReply_t; + + +#endif + + + diff -Nru a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h --- a/drivers/message/fusion/lsi/mpi_targ.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/mpi_targ.h Fri Apr 26 00:01:26 2002 @@ -6,7 +6,7 @@ * Title: MPI Target mode messages and structures * Creation Date: June 22, 2000 * - * MPI Version: 01.01.04 + * MPI Version: 01.02.04 * * Version History * --------------- @@ -26,6 +26,14 @@ * Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and * MPI_TARGET_FCP_CMD_BUFFER. * 03-27-01 01.01.04 Added structure offset comments. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 09-28-01 01.02.02 Added structure for MPI_TARGET_SCSI_SPI_STATUS_IU. + * Added PriorityReason field to some replies and + * defined more PriorityReason codes. + * Added some defines for to support previous version + * of MPI. + * 10-04-01 01.02.03 Added PriorityReason to MSG_TARGET_ERROR_REPLY. + * 11-01-01 01.02.04 Added define for TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY. * -------------------------------------------------------------------------- */ @@ -78,6 +86,7 @@ #define CMD_BUFFER_POST_FLAGS_64_BIT_ADDR (0x80) #define CMD_BUFFER_POST_IO_INDEX_MASK (0x00003FFF) +#define CMD_BUFFER_POST_IO_INDEX_MASK_0100 (0x000003FF) /* obsolete */ typedef struct _MSG_TARGET_CMD_BUFFER_POST_REPLY @@ -97,7 +106,7 @@ } MSG_TARGET_CMD_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REPLY, TargetCmdBufferPostReply_t, MPI_POINTER pTargetCmdBufferPostReply_t; - +/* the following structure is obsolete as of MPI v1.2 */ typedef struct _MSG_PRIORITY_CMD_RECEIVED_REPLY { U16 Reserved; /* 00h */ @@ -117,6 +126,13 @@ #define PRIORITY_REASON_NO_DISCONNECT (0x00) #define PRIORITY_REASON_SCSI_TASK_MANAGEMENT (0x01) +#define PRIORITY_REASON_CMD_PARITY_ERR (0x02) +#define PRIORITY_REASON_MSG_OUT_PARITY_ERR (0x03) +#define PRIORITY_REASON_LQ_CRC_ERR (0x04) +#define PRIORITY_REASON_CMD_CRC_ERR (0x05) +#define PRIORITY_REASON_PROTOCOL_ERR (0x06) +#define PRIORITY_REASON_DATA_OUT_PARITY_ERR (0x07) +#define PRIORITY_REASON_DATA_OUT_CRC_ERR (0x08) #define PRIORITY_REASON_UNKNOWN (0xFF) @@ -129,7 +145,8 @@ U8 Reserved2; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ - U16 Reserved3; /* 0Ch */ + U8 PriorityReason; /* 0Ch */ + U8 Reserved3; /* 0Dh */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U32 ReplyWord; /* 14h */ @@ -204,7 +221,8 @@ U8 Reserved2; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ - U16 Reserved3; /* 0Ch */ + U8 PriorityReason; /* 0Ch */ + U8 Reserved3; /* 0Dh */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U32 ReplyWord; /* 14h */ @@ -234,8 +252,34 @@ TargetStatusSendRequest_t, MPI_POINTER pTargetStatusSendRequest_t; #define TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS (0x01) +#define TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY (0x04) #define TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER (0x80) +typedef struct _MPI_TARGET_FCP_RSP_BUFFER +{ + U8 Reserved0[8]; /* 00h */ + U8 FcpStatus; /* 08h */ + U8 FcpFlags; /* 09h */ + U8 Reserved1[2]; /* 0Ah */ + U32 FcpResid; /* 0Ch */ + U32 FcpSenseLength; /* 10h */ + U32 FcpResponseLength; /* 14h */ + U8 FcpResponseData[8]; /* 18h */ + U8 FcpSenseData[32]; /* Pad to 64 bytes */ /* 20h */ +} MPI_TARGET_FCP_RSP_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_RSP_BUFFER, + MpiTargetFcpRspBuffer, MPI_POINTER pMpiTargetFcpRspBuffer; + +typedef struct _MPI_TARGET_SCSI_SPI_STATUS_IU +{ + U8 Reserved0; /* 00h */ + U8 Reserved1; /* 01h */ + U8 Valid; /* 02h */ + U8 Status; /* 03h */ + U32 SenseDataListLength; /* 04h */ + U32 PktFailuresListLength; /* 08h */ + U8 SenseData[52]; /* Pad the IU to 64 bytes */ /* 0Ch */ +} MPI_TARGET_SCSI_SPI_STATUS_IU, MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_STATUS_IU, + TargetScsiSpiStatusIU_t, MPI_POINTER pTargetScsiSpiStatusIU_t; /****************************************************************************/ /* Target Mode Abort Request */ @@ -323,6 +367,41 @@ #define SET_PORT(t, p) ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) | \ (((p) << TARGET_MODE_REPLY_PORT_SHIFT) & \ TARGET_MODE_REPLY_PORT_MASK)) + +/* the following obsolete values are for MPI v1.0 support */ +#define TARGET_MODE_REPLY_0100_MASK_HOST_INDEX (0x000003FF) +#define TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX (0) +#define TARGET_MODE_REPLY_0100_MASK_IOC_INDEX (0x001FF800) +#define TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX (11) +#define TARGET_MODE_REPLY_0100_PORT_MASK (0x00400000) +#define TARGET_MODE_REPLY_0100_PORT_SHIFT (22) +#define TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX (0x1F800000) +#define TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX (23) + +#define GET_HOST_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) \ + >> TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) + +#define SET_HOST_INDEX_0100(t, hi) \ + ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) | \ + (((hi) << TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) & \ + TARGET_MODE_REPLY_0100_MASK_HOST_INDEX)) + +#define GET_IOC_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) \ + >> TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) + +#define SET_IOC_INDEX_0100(t, ii) \ + ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) | \ + (((ii) << TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) & \ + TARGET_MODE_REPLY_0100_MASK_IOC_INDEX)) + +#define GET_INITIATOR_INDEX_0100(x) \ + (((x) & TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) \ + >> TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) + +#define SET_INITIATOR_INDEX_0100(t, ii) \ + ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) | \ + (((ii) << TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) & \ + TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX)) #endif diff -Nru a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h --- a/drivers/message/fusion/lsi/mpi_type.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/lsi/mpi_type.h Fri Apr 26 00:01:26 2002 @@ -6,7 +6,7 @@ * Title: MPI Basic type definitions * Creation Date: June 6, 2000 * - * MPI Version: 01.01.02 + * MPI Version: 01.02.01 * * Version History * --------------- @@ -17,6 +17,7 @@ * 06-06-00 01.00.01 Update version number for 1.0 release. * 11-02-00 01.01.01 Original release for post 1.0 work * 02-20-01 01.01.02 Added define and ifdef for MPI_POINTER. + * 08-08-01 01.02.01 Original release for v1.2 work. * -------------------------------------------------------------------------- */ diff -Nru a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c --- a/drivers/message/fusion/mptbase.c Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/mptbase.c Fri Apr 26 00:01:27 2002 @@ -20,6 +20,12 @@ * And to Roger Hickerson (LSI Logic) for tirelessly supporting * this driver project. * + * A special thanks to Pamela Delaney (LSI Logic) for tons of work + * and countless enhancements while adding support for the 1030 + * chip family. Pam has been instrumental in the development of + * of the 2.xx.xx series fusion drivers, and her contributions are + * far too numerous to hope to list in one place. + * * All manner of help from Stephen Shirron (LSI Logic): * low-level FC analysis, debug + various fixes in FCxx firmware, * initial port to alpha platform, various driver code optimizations, @@ -38,11 +44,12 @@ * for gobs of hard work fixing and optimizing LAN code. * THANK YOU! * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptbase.c,v 1.53.4.3 2001/09/18 03:54:54 sralston Exp $ + * $Id: mptbase.c,v 1.110 2002/02/27 18:44:20 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -93,11 +100,14 @@ #include #include #include -#include +#include /* needed for in_interrupt() proto */ #include #ifdef CONFIG_MTRR #include #endif +#ifdef __sparc__ +#include /* needed for __irq_itoa() proto */ +#endif #include "mptbase.h" @@ -110,27 +120,33 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); - /* * cmd line parameters */ MODULE_PARM(PortIo, "0-1i"); MODULE_PARM_DESC(PortIo, "[0]=Use mmap, 1=Use port io"); -MODULE_PARM(HardReset, "0-1i"); -MODULE_PARM_DESC(HardReset, "0=Disable HardReset, [1]=Enable HardReset"); static int PortIo = 0; -static int HardReset = 1; + +#ifdef MFCNT +static int mfcounter = 0; +#define PRINT_MF_COUNT 20000 +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Public data... */ -int mpt_lan_index = 0; -int mpt_stm_index = 0; +int mpt_lan_index = -1; +int mpt_stm_index = -1; + +struct proc_dir_entry *mpt_proc_root_dir; + +DmpServices_t *DmpService; + +void *mpt_v_ASCQ_TablePtr; +const char **mpt_ScsiOpcodesPtr; +int mpt_ASCQ_TableSz; -void *mpt_v_ASCQ_TablePtr = NULL; -const char **mpt_ScsiOpcodesPtr = NULL; -int mpt_ASCQ_TableSz = 0; #define WHOINIT_UNKNOWN 0xAA @@ -139,12 +155,12 @@ * Private data... */ /* Adapter lookup table */ -static MPT_ADAPTER *mpt_adapters[MPT_MAX_ADAPTERS] = {0}; + MPT_ADAPTER *mpt_adapters[MPT_MAX_ADAPTERS]; static MPT_ADAPTER_TRACKER MptAdapters; /* Callback lookup table */ static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS]; /* Protocol driver class lookup table */ -static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS]; +static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS]; /* Event handler lookup table */ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; /* Reset handler lookup table */ @@ -152,6 +168,10 @@ static int FusionInitCalled = 0; static int mpt_base_index = -1; +static int last_drv_idx = -1; +static int isense_idx = -1; + +static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -160,49 +180,84 @@ static void mpt_interrupt(int irq, void *bus_id, struct pt_regs *r); static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); -static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason); -static int mpt_adapter_install(struct pci_dev *pdev); -static void mpt_detect_929_bound_ports(MPT_ADAPTER *this, struct pci_dev *pdev); +static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag); +static int mpt_adapter_install(struct pci_dev *pdev); +static void mpt_detect_bound_ports(MPT_ADAPTER *this, struct pci_dev *pdev); static void mpt_adapter_disable(MPT_ADAPTER *ioc, int freeup); static void mpt_adapter_dispose(MPT_ADAPTER *ioc); static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc); -static int MakeIocReady(MPT_ADAPTER *ioc, int force); -static u32 GetIocState(MPT_ADAPTER *ioc, int cooked); -static int GetIocFacts(MPT_ADAPTER *ioc); -static int GetPortFacts(MPT_ADAPTER *ioc, int portnum); -static int SendIocInit(MPT_ADAPTER *ioc); -static int SendPortEnable(MPT_ADAPTER *ioc, int portnum); -static int mpt_fc9x9_reset(MPT_ADAPTER *ioc, int ignore); -static int KickStart(MPT_ADAPTER *ioc, int ignore); -static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type); +static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag); +//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); +static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason); +static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag); +static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag); +static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag); +static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag); +static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag); +static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag); +static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag); +static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag); static int PrimeIocFifos(MPT_ADAPTER *ioc); -static int HandShakeReqAndReply(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait); -static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong); -static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong); -static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong); +static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag); +static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag); +static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag); static int GetLanConfigPages(MPT_ADAPTER *ioc); +static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum); +static int GetIoUnitPage2(MPT_ADAPTER *ioc); +static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); +static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); +static int mpt_findImVolumes(MPT_ADAPTER *ioc); +static void mpt_timer_expired(unsigned long data); static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); -static int procmpt_create(void); #ifdef CONFIG_PROC_FS +static int procmpt_create(void); static int procmpt_destroy(void); +static int procmpt_summary_read(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int procmpt_version_read(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int procmpt_iocinfo_read(char *buf, char **start, off_t offset, + int request, int *eof, void *data); #endif -static int procmpt_read_summary(char *page, char **start, off_t off, int count, int *eof, void *data); -static int procmpt_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data); -/*static int procmpt_info(char *buf, char **start, off_t offset, int len);*/ +static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); +//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info); -static struct proc_dir_entry *procmpt_root_dir = NULL; - int fusion_init(void); static void fusion_exit(void); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * more Private data... + */ +#ifdef CONFIG_PROC_FS +struct _mpt_proc_list { + const char *name; + int (*f)(char *, char **, off_t, int, int *, void *); +} mpt_proc_list[] = { + { "summary", procmpt_summary_read}, + { "version", procmpt_version_read}, +}; +#define MPT_PROC_ENTRIES (sizeof(mpt_proc_list)/sizeof(mpt_proc_list[0])) + +struct _mpt_ioc_proc_list { + const char *name; + int (*f)(char *, char **, off_t, int, int *, void *); +} mpt_ioc_proc_list[] = { + { "info", procmpt_iocinfo_read}, + { "summary", procmpt_summary_read}, +}; +#define MPT_IOC_PROC_ENTRIES (sizeof(mpt_ioc_proc_list)/sizeof(mpt_ioc_proc_list[0])) + +#endif + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* 20000207 -sralston * GRRRRR... IOSpace (port i/o) register access (for the 909) is back! * 20000517 -sralston @@ -225,9 +280,18 @@ writel(v, a); } +static inline void CHIPREG_PIO_WRITE32(volatile u32 *a, u32 v) +{ + outl(v, (unsigned long)a); +} + +static inline u32 CHIPREG_PIO_READ32(volatile u32 *a) +{ + return inl((unsigned long)a); +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure @@ -252,8 +316,7 @@ MPT_FRAME_HDR *mf; MPT_FRAME_HDR *mr; u32 pa; - u32 *m; - int req_idx; + int req_idx = -1; int cb_idx; int type; int freeme; @@ -262,6 +325,21 @@ ioc = bus_id; /* + * Verify ioc pointer is ok + */ + { + MPT_ADAPTER *iocCmp; + iocCmp = mpt_adapter_find_first(); + while ((ioc != iocCmp) && iocCmp) + iocCmp = mpt_adapter_find_next(iocCmp); + + if (!iocCmp) { + printk(KERN_WARNING "mpt_interrupt: Invalid ioc!\n"); + return; + } + } + + /* * Drain the reply FIFO! * * NOTES: I've seen up to 10 replies processed in this loop, so far... @@ -281,25 +359,27 @@ * Check for non-TURBO reply! */ if (pa & MPI_ADDRESS_REPLY_A_BIT) { - dma_addr_t reply_dma_addr; + u32 reply_dma_low; u16 ioc_stat; /* non-TURBO reply! Hmmm, something may be up... * Newest turbo reply mechanism; get address * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)! */ - reply_dma_addr = (pa = (pa << 1)); - /* Map DMA address of reply header to cpu address. */ - m = (u32 *) ((u8 *)ioc->reply_frames + - (reply_dma_addr - ioc->reply_frames_dma)); + /* Map DMA address of reply header to cpu address. + * pa is 32 bits - but the dma address may be 32 or 64 bits + * get offset based only only the low addresses + */ + reply_dma_low = (pa = (pa << 1)); + mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames + + (reply_dma_low - ioc->reply_frames_low_dma)); - mr = (MPT_FRAME_HDR *) m; req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx); cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; mf = MPT_INDEX_2_MFPTR(ioc, req_idx); - dprintk((KERN_INFO MYNAM ": %s: Got non-TURBO reply=%p\n", + dprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p\n", ioc->name, mr)); DBG_DUMP_REPLY_FRAME(mr) @@ -307,7 +387,7 @@ * Check/log IOC log info */ ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus); - if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); if ((int)ioc->chip_type <= (int)FC929) mpt_fc_log_info(ioc, log_info); @@ -318,7 +398,7 @@ /* * Process turbo (context) reply... */ - dirqprintk((KERN_INFO MYNAM ": %s: Got TURBO reply(=%08x)\n", ioc->name, pa)); + dirqprintk((MYIOC_s_INFO_FMT "Got TURBO reply(=%08x)\n", ioc->name, pa)); type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT); if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) { cb_idx = mpt_stm_index; @@ -357,6 +437,34 @@ pa = 0; /* No reply flush! */ } + if ((int)ioc->chip_type > (int)FC929) { + /* Verify mf, mf are reasonable. + */ + if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth)) + || (mf < ioc->req_frames)) ) { + printk(MYIOC_s_WARN_FMT + "mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, mf, req_idx); + cb_idx = 0; + pa = 0; + freeme = 0; + } + if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth)) + || (mr < ioc->reply_frames)) ) { + printk(MYIOC_s_WARN_FMT + "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, mr); + cb_idx = 0; + pa = 0; + freeme = 0; + } + if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) { + printk(MYIOC_s_WARN_FMT + "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx); + cb_idx = 0; + pa = 0; + freeme = 0; + } + } + /* Check for (valid) IO callback! */ if (cb_idx) { /* Do the callback! */ @@ -374,15 +482,18 @@ /* Put Request back on FreeQ! */ spin_lock_irqsave(&ioc->FreeQlock, flags); Q_ADD_TAIL(&ioc->FreeQ, &mf->u.frame.linkage, MPT_FRAME_HDR); +#ifdef MFCNT + ioc->mfcnt--; +#endif spin_unlock_irqrestore(&ioc->FreeQlock, flags); } count++; - dirqprintk((KERN_INFO MYNAM ": %s: ISR processed frame #%d\n", ioc->name, count)); + dirqprintk((MYIOC_s_INFO_FMT "ISR processed frame #%d\n", ioc->name, count)); mb(); if (count >= MPT_MAX_REPLIES_PER_ISR) { - dirqprintk((KERN_INFO MYNAM ": %s: ISR processed %d replies.", + dirqprintk((MYIOC_s_INFO_FMT "ISR processed %d replies.", ioc->name, count)); dirqprintk((" Giving this ISR a break!\n")); return; @@ -409,17 +520,17 @@ int freereq = 1; u8 func; - dprintk((KERN_INFO MYNAM ": %s: mpt_base_reply() called\n", ioc->name)); + dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name)); if ((mf == NULL) || (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(KERN_ERR MYNAM ": %s: ERROR - NULL or BAD request frame ptr! (=%p)\n", + printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n", ioc->name, mf); return 1; } if (reply == NULL) { - dprintk((KERN_ERR MYNAM ": %s: ERROR - Unexpected NULL Event (turbo?) reply!\n", + dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n", ioc->name)); return 1; } @@ -430,7 +541,7 @@ } func = reply->u.hdr.Function; - dprintk((KERN_INFO MYNAM ": %s: mpt_base_reply, Function=%02Xh\n", + dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n", ioc->name, func)); if (func == MPI_FUNCTION_EVENT_NOTIFICATION) { @@ -441,30 +552,77 @@ results = ProcessEventNotification(ioc, pEvReply, &evHandlers); if (results != evHandlers) { /* CHECKME! Any special handling needed here? */ - dprintk((KERN_WARNING MYNAM ": %s: Hmmm... Called %d event handlers, sum results = %d\n", + dprintk((MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n", ioc->name, evHandlers, results)); } /* - * Hmmm... It seems that EventNotificationReply is an exception - * to the rule of one reply per request. + * Hmmm... It seems that EventNotificationReply is an exception + * to the rule of one reply per request. */ if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) freereq = 0; + #ifdef CONFIG_PROC_FS // LogEvent(ioc, pEvReply); #endif + } else if (func == MPI_FUNCTION_EVENT_ACK) { - dprintk((KERN_INFO MYNAM ": %s: mpt_base_reply, EventAck reply received\n", + dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", ioc->name)); + } else if (func == MPI_FUNCTION_CONFIG) { + CONFIGPARMS *pCfg; + unsigned long flags; + + dprintk((MYIOC_s_INFO_FMT "config_complete (mf=%p,mr=%p)\n", + ioc->name, mf, reply)); + + pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *))); + + if (pCfg) { + /* disable timer and remove from linked list */ + del_timer(&pCfg->timer); + + spin_lock_irqsave(&ioc->FreeQlock, flags); + Q_DEL_ITEM(&pCfg->linkage); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + /* + * If IOC Status is SUCCESS, save the header + * and set the status code to GOOD. + */ + pCfg->status = MPT_CONFIG_ERROR; + if (reply) { + ConfigReply_t *pReply = (ConfigReply_t *)reply; + u16 status; + + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + dprintk((KERN_NOTICE " IOCStatus=%04xh, IOCLogInfo=%08xh\n", + status, le32_to_cpu(pReply->IOCLogInfo))); + + pCfg->status = status; + if (status == MPI_IOCSTATUS_SUCCESS) { + pCfg->hdr->PageVersion = pReply->Header.PageVersion; + pCfg->hdr->PageLength = pReply->Header.PageLength; + pCfg->hdr->PageNumber = pReply->Header.PageNumber; + pCfg->hdr->PageType = pReply->Header.PageType; + } + } + + /* + * Wake up the original calling thread + */ + pCfg->wait_done = 1; + wake_up(&mpt_waitq); + } } else { - printk(KERN_ERR MYNAM ": %s: ERROR - Unexpected msg function (=%02Xh) reply received!\n", + printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", ioc->name, func); } /* - * Conditionally tell caller to free the original - * EventNotification/EventAck/unexpected request frame! + * Conditionally tell caller to free the original + * EventNotification/EventAck/unexpected request frame! */ return freereq; } @@ -480,21 +638,22 @@ * protocol-specific driver must do this before it will be able to * use any IOC resources, such as obtaining request frames. * - * NOTES: The SCSI protocol driver currently calls this routine twice - * in order to register separate callbacks; one for "normal" SCSI IO - * and another for MptScsiTaskMgmt requests. + * NOTES: The SCSI protocol driver currently calls this routine thrice + * in order to register separate callbacks; one for "normal" SCSI IO; + * one for MptScsiTaskMgmt requests; one for Scan/DV requests. * * Returns a positive integer valued "handle" in the - * range (and S.O.D. order) {7,6,...,1} if successful. + * range (and S.O.D. order) {N,...,7,6,5,...,1} if successful. * Any non-positive return value (including zero!) should be considered * an error by the caller. */ int mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass) { - int r = -1; int i; + last_drv_idx = -1; + #ifndef MODULE /* * Handle possibility of the mptscsih_detect() routine getting @@ -512,7 +671,7 @@ #endif /* - * Search for empty callback slot in this order: {7,6,...,1} + * Search for empty callback slot in this order: {N,...,7,6,5,...,1} * (slot/handle 0 is reserved!) */ for (i = MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { @@ -520,7 +679,7 @@ MptCallbacks[i] = cbfunc; MptDriverClass[i] = dclass; MptEvHandlers[i] = NULL; - r = i; + last_drv_idx = i; if (cbfunc != mpt_base_reply) { MOD_INC_USE_COUNT; } @@ -528,7 +687,7 @@ } } - return r; + return last_drv_idx; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -546,6 +705,11 @@ MptCallbacks[cb_idx] = NULL; MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER; MptEvHandlers[cb_idx] = NULL; + + last_drv_idx++; + if (isense_idx != -1 && isense_idx <= cb_idx) + isense_idx++; + if (cb_idx != mpt_base_index) { MOD_DEC_USE_COUNT; } @@ -639,7 +803,8 @@ * @handle: Handle of registered MPT protocol driver * @iocid: IOC unique identifier (integer) * - * Returns pointer to a MPT request frame or %NULL if none are available. + * Returns pointer to a MPT request frame or %NULL if none are available + * or IOC is not active. */ MPT_FRAME_HDR* mpt_get_msg_frame(int handle, int iocid) @@ -650,6 +815,16 @@ /* validate handle and ioc identifier */ iocp = mpt_adapters[iocid]; + +#ifdef MFCNT + if (!iocp->active) + printk(KERN_WARNING "IOC Not Active! mpt_get_msg_frame returning NULL!\n"); +#endif + + /* If interrupts are not attached, do not return a request frame */ + if (!iocp->active) + return NULL; + spin_lock_irqsave(&iocp->FreeQlock, flags); if (! Q_IS_EMPTY(&iocp->FreeQ)) { int req_offset; @@ -662,8 +837,20 @@ mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_offset / iocp->req_sz); mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0; +#ifdef MFCNT + iocp->mfcnt++; +#endif } spin_unlock_irqrestore(&iocp->FreeQlock, flags); + +#ifdef MFCNT + if (mf == NULL) + printk(KERN_WARNING "IOC Active. No free Msg Frames! Count 0x%x Max 0x%x\n", iocp->mfcnt, iocp->req_depth); + mfcounter++; + if (mfcounter == PRINT_MF_COUNT) + printk(KERN_INFO "MF Count 0x%x Max 0x%x \n", iocp->mfcnt, iocp->req_depth); +#endif + dmfprintk((KERN_INFO MYNAM ": %s: mpt_get_msg_frame(%d,%d), got mf=%p\n", iocp->name, handle, iocid, mf)); return mf; @@ -687,7 +874,7 @@ iocp = mpt_adapters[iocid]; if (iocp != NULL) { - dma_addr_t mf_dma_addr; + u32 mf_dma_addr; int req_offset; /* ensure values are reset properly! */ @@ -700,23 +887,23 @@ #ifdef MPT_DEBUG_MSG_FRAME { u32 *m = mf->u.frame.hwhdr.__hdr; - int i, n; + int ii, n; printk(KERN_INFO MYNAM ": %s: About to Put msg frame @ %p:\n" KERN_INFO " ", iocp->name, m); n = iocp->req_sz/4 - 1; while (m[n] == 0) n--; - for (i=0; i<=n; i++) { - if (i && ((i%8)==0)) + for (ii=0; ii<=n; ii++) { + if (ii && ((ii%8)==0)) printk("\n" KERN_INFO " "); - printk(" %08x", le32_to_cpu(m[i])); + printk(" %08x", le32_to_cpu(m[ii])); } printk("\n"); } #endif - mf_dma_addr = iocp->req_frames_dma + req_offset; + mf_dma_addr = iocp->req_frames_low_dma + req_offset; CHIPREG_WRITE32(&iocp->chip->RequestFifo, mf_dma_addr); } } @@ -742,6 +929,9 @@ /* Put Request back on FreeQ! */ spin_lock_irqsave(&iocp->FreeQlock, flags); Q_ADD_TAIL(&iocp->FreeQ, &mf->u.frame.linkage, MPT_FRAME_HDR); +#ifdef MFCNT + iocp->mfcnt--; +#endif spin_unlock_irqrestore(&iocp->FreeQlock, flags); } } @@ -754,8 +944,9 @@ * @iocid: IOC unique identifier (integer) * @reqBytes: Size of the request in bytes * @req: Pointer to MPT request frame + * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. * - * This routine is used exclusively by mptscsih to send MptScsiTaskMgmt + * This routine is used exclusively to send MptScsiTaskMgmt * requests since they are required to be sent via doorbell handshake. * * NOTE: It is the callers responsibility to byte-swap fields in the @@ -764,41 +955,30 @@ * Returns 0 for success, non-zero for failure. */ int -mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req) +mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req, int sleepFlag) { MPT_ADAPTER *iocp; int r = 0; iocp = mpt_adapters[iocid]; if (iocp != NULL) { - u8 *req_as_bytes; - u32 ioc_raw_state; - int i; - - /* YIKES! We already know something is amiss. - * Do upfront check on IOC state. - */ - ioc_raw_state = GetIocState(iocp, 0); - if ((ioc_raw_state & MPI_DOORBELL_ACTIVE) || - ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL)) { - printk(KERN_WARNING MYNAM ": %s: Bad IOC state (%08x) WARNING!\n", - iocp->name, ioc_raw_state); - if ((r = mpt_do_ioc_recovery(iocp, MPT_HOSTEVENT_IOC_RECOVER)) != 0) { - printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n", - r, iocp->name); - return r; - } - } + u8 *req_as_bytes; + int ii; + + /* State is known to be good upon entering + * this function so issue the bus reset + * request. + */ /* * Emulate what mpt_put_msg_frame() does /wrt to sanity * setting cb_idx/req_idx. But ONLY if this request * is in proper (pre-alloc'd) request buffer range... */ - i = MFPTR_2_MPT_INDEX(iocp,(MPT_FRAME_HDR*)req); - if (reqBytes >= 12 && i >= 0 && i < iocp->req_depth) { + ii = MFPTR_2_MPT_INDEX(iocp,(MPT_FRAME_HDR*)req); + if (reqBytes >= 12 && ii >= 0 && ii < iocp->req_depth) { MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req; - mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(i); + mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii); mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; } @@ -810,36 +990,40 @@ ((reqBytes/4)<chip->Doorbell) & MPI_DOORBELL_ACTIVE)) + return -5; + dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n", - iocp->name, i)); + iocp->name, ii)); CHIPREG_WRITE32(&iocp->chip->IntStatus, 0); - if ((r = WaitForDoorbellAck(iocp, 1)) < 0) { + if ((r = WaitForDoorbellAck(iocp, 1, sleepFlag)) < 0) { return -2; } /* Send request via doorbell handshake */ req_as_bytes = (u8 *) req; - for (i = 0; i < reqBytes/4; i++) { + for (ii = 0; ii < reqBytes/4; ii++) { u32 word; - word = ((req_as_bytes[(i*4) + 0] << 0) | - (req_as_bytes[(i*4) + 1] << 8) | - (req_as_bytes[(i*4) + 2] << 16) | - (req_as_bytes[(i*4) + 3] << 24)); + word = ((req_as_bytes[(ii*4) + 0] << 0) | + (req_as_bytes[(ii*4) + 1] << 8) | + (req_as_bytes[(ii*4) + 2] << 16) | + (req_as_bytes[(ii*4) + 3] << 24)); CHIPREG_WRITE32(&iocp->chip->Doorbell, word); - if ((r = WaitForDoorbellAck(iocp, 1)) < 0) { + if ((r = WaitForDoorbellAck(iocp, 1, sleepFlag)) < 0) { r = -3; break; } } - if ((r = WaitForDoorbellInt(iocp, 2)) >= 0) + if ((r = WaitForDoorbellInt(iocp, 10, sleepFlag)) >= 0) r = 0; else r = -4; @@ -871,8 +1055,8 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_adapter_find_next - Find next MPT adapter pointer. - * @prev: Pointer to previous MPT adapter + * mpt_adapter_find_next - Find next MPT adapter pointer. + * @prev: Pointer to previous MPT adapter * * Returns next MPT adapter pointer or %NULL if there are no more. */ @@ -888,13 +1072,13 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_pci_scan - Scan PCI devices for MPT adapters. * * Returns count of MPT adapters found, keying off of PCI vendor and * device_id's. */ -int __init +static int __init mpt_pci_scan(void) { struct pci_dev *pdev; @@ -906,7 +1090,7 @@ dprintk((KERN_INFO MYNAM ": Checking for MPT adapters...\n")); /* - * NOTE: The 929 (I believe) will appear as 2 separate PCI devices, + * NOTE: The 929 and 1030 will appear as 2 separate PCI devices, * one for each channel. */ pci_for_each_dev(pdev) { @@ -917,9 +1101,9 @@ if ((pdev->device != MPI_MANUFACTPAGE_DEVICEID_FC909) && (pdev->device != MPI_MANUFACTPAGE_DEVICEID_FC929) && (pdev->device != MPI_MANUFACTPAGE_DEVICEID_FC919) && + (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1030) && #if 0 /* FIXME! C103x family */ - (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1030) && (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1030_ZC) && (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1035) && #endif @@ -929,7 +1113,7 @@ } /* GRRRRR - * 929 dual function devices may be presented in Func 1,0 order, + * dual function devices (929, 1030) may be presented in Func 1,0 order, * but we'd really really rather have them in Func 0,1 order. * Do some kind of look ahead here... */ @@ -937,11 +1121,11 @@ pdev2 = pci_peek_next_dev(pdev); if (pdev2 && (pdev2->vendor == 0x1000) && (PCI_SLOT(pdev2->devfn) == PCI_SLOT(pdev->devfn)) && - (pdev2->device == MPI_MANUFACTPAGE_DEVICEID_FC929) && + (pdev2->device == pdev->device) && (pdev2->bus->number == pdev->bus->number) && !(pdev2->devfn & 1)) { dprintk((KERN_INFO MYNAM ": MPT adapter found: PCI bus/dfn=%02x/%02xh, class=%08x, id=%xh\n", - pdev2->bus->number, pdev2->devfn, pdev2->class, pdev2->device)); + pdev2->bus->number, pdev2->devfn, pdev2->class, pdev2->device)); found++; if ((r = mpt_adapter_install(pdev2)) == 0) count++; @@ -969,9 +1153,7 @@ } #ifdef CONFIG_PROC_FS - if (procmpt_create() != 0) - printk(KERN_WARNING MYNAM ": WARNING! - %s creation failed!\n", - MPT_PROCFS_MPTBASEDIR); + (void) procmpt_create(); #endif return count; @@ -1004,7 +1186,7 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_adapter_install - Install a PCI intelligent MPT adapter. * @pdev: Pointer to pci_dev structure * @@ -1030,7 +1212,7 @@ unsigned long port; u32 msize; u32 psize; - int i; + int ii; int r = -ENODEV; int len; @@ -1040,41 +1222,68 @@ return -ENOMEM; } memset(ioc, 0, sizeof(*ioc)); - ioc->req_sz = MPT_REQ_SIZE; /* avoid div by zero! */ ioc->alloc_total = sizeof(MPT_ADAPTER); + ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ + ioc->reply_sz = ioc->req_sz; ioc->pcidev = pdev; + ioc->diagPending = 0; + spin_lock_init(&ioc->diagLock); + + /* Initialize the event logging. + */ + ioc->eventTypes = 0; /* None */ + ioc->eventContext = 0; + ioc->eventLogSize = 0; + ioc->events = NULL; + +#ifdef MFCNT + ioc->mfcnt = 0; +#endif + + /* Initialize the FW and Data image pointers. + */ + ioc->FWImage = NULL; + ioc->FWImage_dma = 0; + + /* Initilize SCSI Config Data structure + */ + memset(&ioc->spi_data, 0, sizeof(ScsiCfgData)); + + /* Initialize the running configQ head. + */ + Q_INIT(&ioc->configQ, Q_ITEM); /* Find lookup slot. */ - for (i=0; i < MPT_MAX_ADAPTERS; i++) { - if (mpt_adapters[i] == NULL) { - ioc->id = i; /* Assign adapter unique id (lookup) */ + for (ii=0; ii < MPT_MAX_ADAPTERS; ii++) { + if (mpt_adapters[ii] == NULL) { + ioc->id = ii; /* Assign adapter unique id (lookup) */ break; } } - if (i == MPT_MAX_ADAPTERS) { - printk(KERN_ERR MYNAM ": ERROR - mpt_adapters[%d] table overflow!\n", i); + if (ii == MPT_MAX_ADAPTERS) { + printk(KERN_ERR MYNAM ": ERROR - mpt_adapters[%d] table overflow!\n", ii); kfree(ioc); return -ENFILE; } mem_phys = msize = 0; port = psize = 0; - for (i=0; i < DEVICE_COUNT_RESOURCE; i++) { - if (pdev->PCI_BASEADDR_FLAGS(i) & PCI_BASE_ADDRESS_SPACE_IO) { + for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) { + if (pdev->PCI_BASEADDR_FLAGS(ii) & PCI_BASE_ADDRESS_SPACE_IO) { /* Get I/O space! */ - port = pdev->PCI_BASEADDR_START(i); - psize = PCI_BASEADDR_SIZE(pdev,i); + port = pdev->PCI_BASEADDR_START(ii); + psize = PCI_BASEADDR_SIZE(pdev,ii); } else { /* Get memmap */ - mem_phys = pdev->PCI_BASEADDR_START(i); - msize = PCI_BASEADDR_SIZE(pdev,i); + mem_phys = pdev->PCI_BASEADDR_START(ii); + msize = PCI_BASEADDR_SIZE(pdev,ii); break; } } ioc->mem_size = msize; - if (i == DEVICE_COUNT_RESOURCE) { + if (ii == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR MYNAM ": ERROR - MPT adapter has no memory regions defined!\n"); kfree(ioc); return -EINVAL; @@ -1098,6 +1307,8 @@ } dprintk((KERN_INFO MYNAM ": mem = %p, mem_phys = %lx\n", mem, mem_phys)); + dprintk((KERN_INFO MYNAM ": facts @ %p, pfacts[0] @ %p\n", + &ioc->facts, &ioc->pfacts[0])); if (PortIo) { u8 *pmem = (u8*)port; ioc->mem_phys = port; @@ -1107,6 +1318,13 @@ ioc->chip = (SYSIF_REGS*)mem; } + /* Save Port IO values incase we need to do downloadboot */ + { + u8 *pmem = (u8*)port; + ioc->pio_mem_phys = port; + ioc->pio_chip = (SYSIF_REGS*)pmem; + } + ioc->chip_type = FCUNK; if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) { ioc->chip_type = FC909; @@ -1120,12 +1338,19 @@ ioc->chip_type = FC919; ioc->prod_name = "LSIFC919"; } -#if 0 - else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_53C1030) { + else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) { ioc->chip_type = C1030; ioc->prod_name = "LSI53C1030"; + { + /* 1030 Chip Fix. Disable Split transactions + * for PCIX. Set bits 4 - 6 to zero. + */ + u16 pcixcmd = 0; + pci_read_config_word(pdev, 0x6a, &pcixcmd); + pcixcmd &= 0xFF8F; + pci_write_config_word(pdev, 0x6a, pcixcmd); + } } -#endif myname = "iocN"; len = strlen(myname); @@ -1145,8 +1370,13 @@ r = request_irq(pdev->irq, mpt_interrupt, SA_SHIRQ, ioc->name, ioc); if (r < 0) { - printk(KERN_ERR MYNAM ": %s: ERROR - Unable to allocate interrupt %d!\n", +#ifndef __sparc__ + printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %d!\n", ioc->name, pdev->irq); +#else + printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %s!\n", + ioc->name, __irq_itoa(pdev->irq)); +#endif iounmap(mem); kfree(ioc); return -EBUSY; @@ -1156,7 +1386,11 @@ pci_set_master(pdev); /* ?? */ +#ifndef __sparc__ dprintk((KERN_INFO MYNAM ": %s installed at interrupt %d\n", ioc->name, pdev->irq)); +#else + dprintk((KERN_INFO MYNAM ": %s installed at interrupt %s\n", ioc->name, __irq_itoa(pdev->irq))); +#endif } /* tack onto tail of our MPT adapter list */ @@ -1166,12 +1400,12 @@ mpt_adapters[ioc->id] = ioc; /* NEW! 20010220 -sralston - * Check for "929 bound ports" to reduce redundant resets. + * Check for "bound ports" (929, 1030) to reduce redundant resets. */ - if (ioc->chip_type == FC929) - mpt_detect_929_bound_ports(ioc, pdev); + if ((ioc->chip_type == FC929) || (ioc->chip_type == C1030)) + mpt_detect_bound_ports(ioc, pdev); - if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP)) != 0) { + if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0) { printk(KERN_WARNING MYNAM ": WARNING - %s did not initialize properly! (%d)\n", ioc->name, r); } @@ -1180,10 +1414,11 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_do_ioc_recovery - Initialize or recover MPT adapter. * @ioc: Pointer to MPT adapter structure * @reason: Event word / reason + * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. * * This routine performs all the steps necessary to bring the IOC * to a OPERATIONAL state. @@ -1191,16 +1426,21 @@ * This routine also pre-fetches the LAN MAC address of a Fibre Channel * MPT adapter. * - * Returns 0 for success. + * Returns: + * 0 for success + * -1 if failed to get board READY + * -2 if READY but IOCFacts Failed + * -3 if READY but PrimeIOCFifos Failed + * -4 if READY but IOCInit Failed */ static int -mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason) +mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) { int hard_reset_done = 0; int alt_ioc_ready = 0; int hard; int r; - int i; + int ii; int handlers; printk(KERN_INFO MYNAM ": Initiating %s %s\n", @@ -1211,156 +1451,106 @@ ioc->active = 0; /* NOTE: Access to IOC's request FreeQ is now blocked! */ -// FIXME? Cleanup all IOC requests here! (or below?) -// But watch out for event associated request? + if (ioc->alt_ioc) { + /* Disable alt-IOC's reply interrupts for a bit ... */ + CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); + ioc->alt_ioc->active = 0; + /* NOTE: Access to alt-IOC's request FreeQ is now blocked! */ + } - hard = HardReset; - if (ioc->alt_ioc && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) + hard = 1; + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) hard = 0; - if ((hard_reset_done = MakeIocReady(ioc, hard)) < 0) { + if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) { printk(KERN_WARNING MYNAM ": %s NOT READY WARNING!\n", ioc->name); return -1; } -// NEW! -#if 0 // Kiss-of-death!?! - if (ioc->alt_ioc) { -// Grrr... Hold off any alt-IOC interrupts (and events) while -// handshaking to IOC, needed because? - /* Disable alt-IOC's reply interrupts for a bit ... */ - alt_ioc_intmask = CHIPREG_READ32(&ioc->alt_ioc->chip->IntMask); - CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); - ioc->alt_ioc->active = 0; - /* NOTE: Access to alt-IOC's request FreeQ is now blocked! */ - } -#endif - + /* hard_reset_done = 0 if a soft reset was performed + * and 1 if a hard reset was performed. + */ if (hard_reset_done && ioc->alt_ioc) { - if ((r = MakeIocReady(ioc->alt_ioc, 0)) == 0) + if ((r = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) alt_ioc_ready = 1; else - printk(KERN_WARNING MYNAM ": alt-%s: (%d) Not ready WARNING!\n", + printk(KERN_WARNING MYNAM + ": alt-%s: (%d) Not ready WARNING!\n", ioc->alt_ioc->name, r); } + /* Get IOC facts! */ + if ((r = GetIocFacts(ioc, sleepFlag, reason)) != 0) + return -2; if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { - /* Get IOC facts! */ - if ((r = GetIocFacts(ioc)) != 0) - return -2; MptDisplayIocCapabilities(ioc); } - /* - * Call each currently registered protocol IOC reset handler - * with pre-reset indication. - * NOTE: If we're doing _IOC_BRINGUP, there can be no - * MptResetHandlers[] registered yet. - */ - if (hard_reset_done) { - r = handlers = 0; - for (i=MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { - if (MptResetHandlers[i]) { - dprintk((KERN_INFO MYNAM ": %s: Calling IOC pre_reset handler #%d\n", - ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc, MPT_IOC_PRE_RESET); - handlers++; - - if (alt_ioc_ready) { - dprintk((KERN_INFO MYNAM ": %s: Calling alt-IOC pre_reset handler #%d\n", - ioc->alt_ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); - handlers++; - } - } - } - /* FIXME? Examine results here? */ - } - - // May need to check/upload firmware & data here! - - if ((r = SendIocInit(ioc)) != 0) - return -3; -// NEW! if (alt_ioc_ready) { - if ((r = SendIocInit(ioc->alt_ioc)) != 0) { - alt_ioc_ready = 0; - printk(KERN_WARNING MYNAM ": alt-%s: (%d) init failure WARNING!\n", - ioc->alt_ioc->name, r); - } - } - - /* - * Call each currently registered protocol IOC reset handler - * with post-reset indication. - * NOTE: If we're doing _IOC_BRINGUP, there can be no - * MptResetHandlers[] registered yet. - */ - if (hard_reset_done) { - r = handlers = 0; - for (i=MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { - if (MptResetHandlers[i]) { - dprintk((KERN_INFO MYNAM ": %s: Calling IOC post_reset handler #%d\n", - ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc, MPT_IOC_POST_RESET); - handlers++; - - if (alt_ioc_ready) { - dprintk((KERN_INFO MYNAM ": %s: Calling alt-IOC post_reset handler #%d\n", - ioc->alt_ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc->alt_ioc, MPT_IOC_POST_RESET); - handlers++; - } - } + if ((r = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) + return -2; + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + MptDisplayIocCapabilities(ioc->alt_ioc); } - /* FIXME? Examine results here? */ } /* * Prime reply & request queues! - * (mucho alloc's) + * (mucho alloc's) Must be done prior to + * init as upper addresses are needed for init. */ if ((r = PrimeIocFifos(ioc)) != 0) + return -3; + + // May need to check/upload firmware & data here! + if ((r = SendIocInit(ioc, sleepFlag)) != 0) return -4; // NEW! if (alt_ioc_ready && ((r = PrimeIocFifos(ioc->alt_ioc)) != 0)) { printk(KERN_WARNING MYNAM ": alt-%s: (%d) FIFO mgmt alloc WARNING!\n", ioc->alt_ioc->name, r); + alt_ioc_ready = 0; } -// FIXME! Cleanup all IOC (and alt-IOC?) requests here! + if (alt_ioc_ready) { + if ((r = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { + alt_ioc_ready = 0; + printk(KERN_WARNING MYNAM + ": alt-%s: (%d) init failure WARNING!\n", + ioc->alt_ioc->name, r); + } + } - if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && - (ioc->lan_cnfg_page0.Header.PageLength == 0)) { - /* - * Pre-fetch the ports LAN MAC address! - * (LANPage1_t stuff) - */ - (void) GetLanConfigPages(ioc); -#ifdef MPT_DEBUG - { - u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; - dprintk((KERN_INFO MYNAM ": %s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", - ioc->name, a[5], a[4], a[3], a[2], a[1], a[0] )); + if (reason == MPT_HOSTEVENT_IOC_BRINGUP){ + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + dprintk((MYIOC_s_INFO_FMT + "firmware upload required!\n", ioc->name)); + + r = mpt_do_upload(ioc, sleepFlag); + if (r != 0) + printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); + /* Handle the alt IOC too */ + if (alt_ioc_ready){ + r = mpt_do_upload(ioc->alt_ioc, sleepFlag); + if (r != 0) + printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); + } } -#endif } + /* Enable! (reply interrupt) */ CHIPREG_WRITE32(&ioc->chip->IntMask, ~(MPI_HIM_RIM)); ioc->active = 1; -// NEW! -#if 0 // Kiss-of-death!?! - if (alt_ioc_ready && (r==0)) { + if (ioc->alt_ioc) { /* (re)Enable alt-IOC! (reply interrupt) */ dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n", ioc->alt_ioc->name)); CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM)); ioc->alt_ioc->active = 1; } -#endif /* NEW! 20010120 -sralston * Enable MPT base driver management of EventNotification @@ -1368,19 +1558,95 @@ */ if (!ioc->facts.EventState) (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */ -// NEW! -// FIXME!?! -// if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) { -// (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */ -// } + + if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) + (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */ + + /* (Bugzilla:fibrebugs, #513) + * Bug fix (part 2)! 20010905 -sralston + * Add additional "reason" check before call to GetLanConfigPages + * (combined with GetIoUnitPage2 call). This prevents a somewhat + * recursive scenario; GetLanConfigPages times out, timer expired + * routine calls HardResetHandler, which calls into here again, + * and we try GetLanConfigPages again... + */ + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + if ((int)ioc->chip_type <= (int)FC929) { + /* + * Pre-fetch FC port WWN and stuff... + * (FCPortPage0_t stuff) + */ + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + (void) GetFcPortPage0(ioc, ii); + } + + if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && + (ioc->lan_cnfg_page0.Header.PageLength == 0)) { + /* + * Pre-fetch the ports LAN MAC address! + * (LANPage1_t stuff) + */ + (void) GetLanConfigPages(ioc); +#ifdef MPT_DEBUG + { + u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; + dprintk((MYIOC_s_INFO_FMT "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + ioc->name, a[5], a[4], a[3], a[2], a[1], a[0] )); + } +#endif + } + } else { + /* Get NVRAM and adapter maximums from SPP 0 and 2 + */ + mpt_GetScsiPortSettings(ioc, 0); + + /* Get version and length of SDP 1 + */ + mpt_readScsiDevicePageHeaders(ioc, 0); + + /* Find IM volumes + */ + if (ioc->facts.MsgVersion >= 0x0102) + mpt_findImVolumes(ioc); + } + + GetIoUnitPage2(ioc); + } + + /* + * Call each currently registered protocol IOC reset handler + * with post-reset indication. + * NOTE: If we're doing _IOC_BRINGUP, there can be no + * MptResetHandlers[] registered yet. + */ + if (hard_reset_done) { + r = handlers = 0; + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + if (MptResetHandlers[ii]) { + dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n", + ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET); + handlers++; + + if (alt_ioc_ready) { + dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", + ioc->name, ioc->alt_ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); + handlers++; + } + } + } + /* FIXME? Examine results here? */ + } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * mpt_detect_929_bound_ports - Search for PCI bus/dev_function - * which matches PCI bus/dev_function (+/-1) for newly discovered 929. + * mpt_detect_bound_ports - Search for PCI bus/dev_function + * which matches PCI bus/dev_function (+/-1) for newly discovered 929 + * or 1030. * @ioc: Pointer to MPT adapter structure * @pdev: Pointer to (struct pci_dev) structure * @@ -1388,22 +1654,22 @@ * using alt_ioc pointer fields in their %MPT_ADAPTER structures. */ static void -mpt_detect_929_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) +mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) { MPT_ADAPTER *ioc_srch = mpt_adapter_find_first(); unsigned int match_lo, match_hi; match_lo = pdev->devfn-1; match_hi = pdev->devfn+1; - dprintk((KERN_INFO MYNAM ": %s: PCI bus/devfn=%x/%x, searching for devfn match on %x or %x\n", + dprintk((MYIOC_s_INFO_FMT "PCI bus/devfn=%x/%x, searching for devfn match on %x or %x\n", ioc->name, pdev->bus->number, pdev->devfn, match_lo, match_hi)); while (ioc_srch != NULL) { struct pci_dev *_pcidev = ioc_srch->pcidev; - if ( (_pcidev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) && - (_pcidev->bus->number == pdev->bus->number) && - (_pcidev->devfn == match_lo || _pcidev->devfn == match_hi) ) { + if ((_pcidev->device == pdev->device) && + (_pcidev->bus->number == pdev->bus->number) && + (_pcidev->devfn == match_lo || _pcidev->devfn == match_hi) ) { /* Paranoia checks */ if (ioc->alt_ioc != NULL) { printk(KERN_WARNING MYNAM ": Oops, already bound (%s <==> %s)!\n", @@ -1418,8 +1684,6 @@ ioc->name, ioc_srch->name)); ioc_srch->alt_ioc = ioc; ioc->alt_ioc = ioc_srch; - ioc->sod_reset = ioc->alt_ioc->sod_reset; - ioc->last_kickstart = ioc->alt_ioc->last_kickstart; break; } ioc_srch = mpt_adapter_find_next(ioc_srch); @@ -1440,10 +1704,10 @@ u32 state; /* Disable the FW */ - state = GetIocState(this, 1); + state = mpt_GetIocState(this, 1); if (state == MPI_IOC_STATE_OPERATIONAL) { - if (SendIocReset(this, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET) != 0) - (void) KickStart(this, 1); + if (SendIocReset(this, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, NO_SLEEP) != 0) + (void) KickStart(this, 1, NO_SLEEP); } /* Disable adapter interrupts! */ @@ -1475,12 +1739,37 @@ } if (freeup && this->sense_buf_pool != NULL) { - sz = (this->req_depth * 256); + sz = (this->req_depth * MPT_SENSE_BUFFER_ALLOC); pci_free_consistent(this->pcidev, sz, this->sense_buf_pool, this->sense_buf_pool_dma); this->sense_buf_pool = NULL; this->alloc_total -= sz; } + + if (freeup && this->events != NULL){ + sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); + kfree(this->events); + this->events = NULL; + this->alloc_total -= sz; + } + + if (freeup && this->FWImage != NULL) { + sz = this->facts.FWImageSize; + pci_free_consistent(this->pcidev, sz, + this->FWImage, this->FWImage_dma); + this->FWImage = NULL; + this->alloc_total -= sz; + } + + if (freeup && this->spi_data.nvram != NULL) { + kfree(this->spi_data.nvram); + this->spi_data.nvram = NULL; + } + + if (freeup && this->spi_data.pIocPg3 != NULL) { + kfree(this->spi_data.pIocPg3); + this->spi_data.pIocPg3 = NULL; + } } } @@ -1575,23 +1864,30 @@ /* * MakeIocReady - Get IOC to a READY state, using KickStart if needed. * @ioc: Pointer to MPT_ADAPTER structure - * @kick: Force hard KickStart of IOC + * @force: Force hard KickStart of IOC + * @sleepFlag: Specifies whether the process can sleep * - * Returns 0 for already-READY, 1 for hard reset success, - * else negative for failure. + * Returns: + * 1 - DIAG reset and READY + * 0 - READY initially OR soft reset and READY + * -1 - Any failure on KickStart + * -2 - Msg Unit Reset Failed + * -3 - IO Unit Reset Failed + * -4 - IOC owned by a PEER */ static int -MakeIocReady(MPT_ADAPTER *ioc, int force) +MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) { u32 ioc_state; int statefault = 0; - int cntdn; + int cntdn; int hard_reset_done = 0; int r; - int i; + int ii; + int whoinit; /* Get current [raw] IOC state */ - ioc_state = GetIocState(ioc, 0); + ioc_state = mpt_GetIocState(ioc, 0); dhsprintk((KERN_INFO MYNAM "::MakeIocReady, %s [raw] state=%08x\n", ioc->name, ioc_state)); /* @@ -1600,7 +1896,7 @@ */ if (ioc_state & MPI_DOORBELL_ACTIVE) { statefault = 1; - printk(KERN_WARNING MYNAM ": %s: Uh-oh, unexpected doorbell active!\n", + printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n", ioc->name); } @@ -1613,7 +1909,7 @@ */ if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { statefault = 2; - printk(KERN_WARNING MYNAM ": %s: Uh-oh, IOC is in FAULT state!!!\n", + printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n", ioc->name); printk(KERN_WARNING " FAULT code = %04xh\n", ioc_state & MPI_DOORBELL_DATA_MASK); @@ -1623,28 +1919,49 @@ * Hmmm... Did it get left operational? */ if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) { - statefault = 3; - dprintk((KERN_WARNING MYNAM ": %s: Hmmm... IOC operational unexpected\n", + dprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n", ioc->name)); + + /* Check WhoInit. + * If PCI Peer, exit. + * Else, if no fault conditions are present, issue a MessageUnitReset + * Else, fall through to KickStart case + */ + whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT; + dprintk((KERN_WARNING MYNAM + ": whoinit 0x%x\n statefault %d force %d\n", + whoinit, statefault, force)); + if (whoinit == MPI_WHOINIT_PCI_PEER) + return -4; + else { + if ((statefault == 0 ) && (force == 0)) { + if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0) + return 0; + } + statefault = 3; + } } - hard_reset_done = KickStart(ioc, statefault||force); + hard_reset_done = KickStart(ioc, statefault||force, sleepFlag); if (hard_reset_done < 0) return -1; /* * Loop here waiting for IOC to come READY. */ - i = 0; + ii = 0; cntdn = HZ * 15; - while ((ioc_state = GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { + if (sleepFlag != CAN_SLEEP) + cntdn *= 10; /* 1500 iterations @ 1msec per */ + + while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { if (ioc_state == MPI_IOC_STATE_OPERATIONAL) { /* * BIOS or previous driver load left IOC in OP state. * Reset messaging FIFOs. */ - if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)) != 0) { - printk(KERN_ERR MYNAM ": %s: ERROR - IOC msg unit reset failed!\n", ioc->name); + if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) { + printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name); return -2; } } else if (ioc_state == MPI_IOC_STATE_RESET) { @@ -1652,25 +1969,30 @@ * Something is wrong. Try to get IOC back * to a known state. */ - if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET)) != 0) { - printk(KERN_ERR MYNAM ": %s: ERROR - IO unit reset failed!\n", ioc->name); + if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) { + printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name); return -3; } } - i++; cntdn--; + ii++; cntdn--; if (!cntdn) { - printk(KERN_ERR MYNAM ": %s: ERROR - Wait IOC_READY state timeout(%d)!\n", - ioc->name, (i+5)/HZ); + printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", + ioc->name, (ii+5)/HZ); return -ETIME; } - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay (1); /* 1 msec delay */ + } + } if (statefault < 3) { - printk(KERN_WARNING MYNAM ": %s: Whew! Recovered from %s\n", + printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name, statefault==1 ? "stuck handshake" : "IOC FAULT"); } @@ -1680,21 +2002,21 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * GetIocState - Get the current state of a MPT adapter. + * mpt_GetIocState - Get the current state of a MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @cooked: Request raw or cooked IOC state * * Returns all IOC Doorbell register bits if cooked==0, else just the * Doorbell bits in MPI_IOC_STATE_MASK. */ -static u32 -GetIocState(MPT_ADAPTER *ioc, int cooked) +u32 +mpt_GetIocState(MPT_ADAPTER *ioc, int cooked) { u32 s, sc; /* Get! */ s = CHIPREG_READ32(&ioc->chip->Doorbell); - dprintk((KERN_INFO MYNAM ": %s: raw state = %08x\n", ioc->name, s)); +// dprintk((MYIOC_s_INFO_FMT "raw state = %08x\n", ioc->name, s)); sc = s & MPI_IOC_STATE_MASK; /* Save! */ @@ -1707,11 +2029,13 @@ /* * GetIocFacts - Send IOCFacts request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure + * @sleepFlag: Specifies whether the process can sleep + * @reason: If recovery, only update facts. * * Returns 0 for success, non-zero for failure. */ static int -GetIocFacts(MPT_ADAPTER *ioc) +GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) { IOCFacts_t get_facts; IOCFactsReply_t *facts; @@ -1741,14 +2065,13 @@ get_facts.Function = MPI_FUNCTION_IOC_FACTS; /* Assert: All other get_facts fields are zero! */ - dprintk((KERN_INFO MYNAM ": %s: Sending get IocFacts request\n", ioc->name)); + dprintk((MYIOC_s_INFO_FMT "Sending get IocFacts request\n", ioc->name)); /* No non-zero fields in the get_facts request are greater than * 1 byte in size, so we can just fire it off as is. */ - r = HandShakeReqAndReply(ioc, - req_sz, (u32*)&get_facts, - reply_sz, (u16*)facts, 3); + r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts, + reply_sz, (u16*)facts, 3 /*seconds*/, sleepFlag); if (r != 0) return r; @@ -1761,14 +2084,17 @@ */ /* Did we get a valid reply? */ if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) { - /* - * If not been here, done that, save off first WhoInit value - */ - if (ioc->FirstWhoInit == WHOINIT_UNKNOWN) - ioc->FirstWhoInit = facts->WhoInit; + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + /* + * If not been here, done that, save off first WhoInit value + */ + if (ioc->FirstWhoInit == WHOINIT_UNKNOWN) + ioc->FirstWhoInit = facts->WhoInit; + } facts->MsgVersion = le16_to_cpu(facts->MsgVersion); facts->MsgContext = le32_to_cpu(facts->MsgContext); + facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions); facts->IOCStatus = le16_to_cpu(facts->IOCStatus); facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo); status = facts->IOCStatus & MPI_IOCSTATUS_MASK; @@ -1776,7 +2102,23 @@ facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth); facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize); - facts->FWVersion = le16_to_cpu(facts->FWVersion); + + /* + * FC f/w version changed between 1.1 and 1.2 + * Old: u16{Major(4),Minor(4),SubMinor(8)} + * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} + */ + if (facts->MsgVersion < 0x0102) { + /* + * Handle old FC f/w style, convert to new... + */ + u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion); + facts->FWVersion.Word = + ((oldv<<12) & 0xFF000000) | + ((oldv<<8) & 0x000FFF00); + } else + facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); + facts->ProductID = le16_to_cpu(facts->ProductID); facts->CurrentHostMfaHighAddr = le32_to_cpu(facts->CurrentHostMfaHighAddr); @@ -1791,52 +2133,42 @@ * Older MPI-1.00.xx struct had 13 dwords, and enlarged * to 14 in MPI-1.01.0x. */ - if (facts->MsgLength >= sizeof(IOCFactsReply_t)/sizeof(u32) && facts->MsgVersion > 0x0100) { + if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && + facts->MsgVersion > 0x0100) { facts->FWImageSize = le32_to_cpu(facts->FWImageSize); - facts->DataImageSize = le32_to_cpu(facts->DataImageSize); } - if (facts->RequestFrameSize) { - /* - * Set values for this IOC's REQUEST queue size & depth... - */ - ioc->req_sz = MIN(MPT_REQ_SIZE, facts->RequestFrameSize * 4); - - /* - * Set values for this IOC's REPLY queue size & depth... - * - * BUG? FIX? 20000516 -nromer & sralston - * GRRR... The following did not translate well from MPI v0.09: - * ioc->reply_sz = MIN(MPT_REPLY_SIZE, facts->ReplySize * 4); - * to 0.10: - * ioc->reply_sz = MIN(MPT_REPLY_SIZE, facts->BlockSize * 4); - * Was trying to minimally optimize to smallest possible reply size - * (and greatly reduce kmalloc size). But LAN may need larger reply? - * - * So for now, just set reply size to request size. FIXME? - */ - ioc->reply_sz = ioc->req_sz; - } else { + if (!facts->RequestFrameSize) { /* Something is wrong! */ - printk(KERN_ERR MYNAM ": %s: ERROR - IOC reported invalid 0 request size!\n", + printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n", ioc->name); - ioc->req_sz = MPT_REQ_SIZE; - ioc->reply_sz = MPT_REPLY_SIZE; return -55; } - ioc->req_depth = MIN(MPT_REQ_DEPTH, facts->GlobalCredits); - ioc->reply_depth = MIN(MPT_REPLY_DEPTH, facts->ReplyQueueDepth); - dprintk((KERN_INFO MYNAM ": %s: reply_sz=%3d, reply_depth=%4d\n", + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + /* + * Set values for this IOC's request & reply frame sizes, + * and request & reply queue depths... + */ + ioc->req_sz = MIN(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4); + ioc->req_depth = MIN(MPT_DEFAULT_REQ_DEPTH, facts->GlobalCredits); + ioc->reply_sz = ioc->req_sz; + ioc->reply_depth = MIN(MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth); + + /* 1030 - should we use a smaller DEFAULT_REPLY_DEPTH? + * FIX + */ + dprintk((MYIOC_s_INFO_FMT "reply_sz=%3d, reply_depth=%4d\n", ioc->name, ioc->reply_sz, ioc->reply_depth)); - dprintk((KERN_INFO MYNAM ": %s: req_sz =%3d, req_depth =%4d\n", + dprintk((MYIOC_s_INFO_FMT "req_sz =%3d, req_depth =%4d\n", ioc->name, ioc->req_sz, ioc->req_depth)); - /* Get port facts! */ - if ( (r = GetPortFacts(ioc, 0)) != 0 ) - return r; + /* Get port facts! */ + if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 ) + return r; + } } else { - printk(KERN_ERR MYNAM ": %s: ERROR - Invalid IOC facts reply!\n", + printk(MYIOC_s_ERR_FMT "Invalid IOC facts reply!\n", ioc->name); return -66; } @@ -1849,15 +2181,16 @@ * GetPortFacts - Send PortFacts request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: Port number + * @sleepFlag: Specifies whether the process can sleep * * Returns 0 for success, non-zero for failure. */ static int -GetPortFacts(MPT_ADAPTER *ioc, int portnum) +GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag) { PortFacts_t get_pfacts; PortFactsReply_t *pfacts; - int i; + int ii; int req_sz; int reply_sz; @@ -1883,16 +2216,16 @@ get_pfacts.PortNumber = portnum; /* Assert: All other get_pfacts fields are zero! */ - dprintk((KERN_INFO MYNAM ": %s: Sending get PortFacts(%d) request\n", + dprintk((MYIOC_s_INFO_FMT "Sending get PortFacts(%d) request\n", ioc->name, portnum)); /* No non-zero fields in the get_pfacts request are greater than * 1 byte in size, so we can just fire it off as is. */ - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&get_pfacts, - reply_sz, (u16*)pfacts, 3); - if (i != 0) - return i; + ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts, + reply_sz, (u16*)pfacts, 3 /*seconds*/, sleepFlag); + if (ii != 0) + return ii; /* Did we get a valid reply? */ @@ -1914,13 +2247,14 @@ /* * SendIocInit - Send IOCInit request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure + * @sleepFlag: Specifies whether the process can sleep * * Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state. * * Returns 0 for success, non-zero for failure. */ static int -SendIocInit(MPT_ADAPTER *ioc) +SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) { IOCInit_t ioc_init; MPIDefaultReply_t init_reply; @@ -1937,20 +2271,35 @@ ioc_init.Function = MPI_FUNCTION_IOC_INIT; /* ioc_init.Flags = 0; */ - /*ioc_init.MaxDevices = 16;*/ - ioc_init.MaxDevices = 255; -/* ioc_init.MaxBuses = 16; */ - ioc_init.MaxBuses = 1; + if ((int)ioc->chip_type <= (int)FC929) { + ioc_init.MaxDevices = MPT_MAX_FC_DEVICES; + } + else { + ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES; + } + ioc_init.MaxBuses = MPT_MAX_BUS; /* ioc_init.MsgFlags = 0; */ /* ioc_init.MsgContext = cpu_to_le32(0x00000000); */ ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ - ioc_init.HostMfaHighAddr = cpu_to_le32(0); /* Say we 32-bit! for now */ - dprintk((KERN_INFO MYNAM ": %s: Sending IOCInit (req @ %p)\n", ioc->name, &ioc_init)); +#ifdef __ia64__ + /* Save the upper 32-bits of the request + * (reply) and sense buffers. + */ + ioc_init.HostMfaHighAddr = cpu_to_le32((u32)(ioc->req_frames_dma >> 32)); + ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)(ioc->sense_buf_pool_dma >> 32)); +#else + /* Force 32-bit addressing */ + ioc_init.HostMfaHighAddr = cpu_to_le32(0); + ioc_init.SenseBufferHighAddr = cpu_to_le32(0); +#endif + + dprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n", + ioc->name, &ioc_init)); - r = HandShakeReqAndReply(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, - sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10); + r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, + sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag); if (r != 0) return r; @@ -1958,7 +2307,7 @@ * since we don't even look at it's contents. */ - if ((r = SendPortEnable(ioc, 0)) != 0) + if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) return r; /* YIKES! SUPER IMPORTANT!!! @@ -1967,21 +2316,27 @@ */ count = 0; cntdn = HZ * 60; /* chg'd from 30 to 60 seconds */ - state = GetIocState(ioc, 1); + if (sleepFlag != CAN_SLEEP) + cntdn *= 10; /* scale for 1msec delays */ + state = mpt_GetIocState(ioc, 1); while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay(1); + } if (!cntdn) { - printk(KERN_ERR MYNAM ": %s: ERROR - Wait IOC_OP state timeout(%d)!\n", + printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n", ioc->name, (count+5)/HZ); return -9; } - state = GetIocState(ioc, 1); + state = mpt_GetIocState(ioc, 1); count++; } - dhsprintk((KERN_INFO MYNAM ": %s: INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", ioc->name, count)); return r; @@ -1992,17 +2347,18 @@ * SendPortEnable - Send PortEnable request to MPT adapter port. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: Port number to enable + * @sleepFlag: Specifies whether the process can sleep * * Send PortEnable to bring IOC to OPERATIONAL state. * * Returns 0 for success, non-zero for failure. */ static int -SendPortEnable(MPT_ADAPTER *ioc, int portnum) +SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag) { PortEnable_t port_enable; MPIDefaultReply_t reply_buf; - int i; + int ii; int req_sz; int reply_sz; @@ -2019,13 +2375,21 @@ /* port_enable.MsgFlags = 0; */ /* port_enable.MsgContext = 0; */ - dprintk((KERN_INFO MYNAM ": %s: Sending Port(%d)Enable (req @ %p)\n", + dprintk((MYIOC_s_INFO_FMT "Sending Port(%d)Enable (req @ %p)\n", ioc->name, portnum, &port_enable)); - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&port_enable, - reply_sz, (u16*)&reply_buf, 65); - if (i != 0) - return i; + /* RAID FW may take a long time to enable + */ + if ((int)ioc->chip_type <= (int)FC929) { + ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, + reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag); + } else { + ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, + reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag); + } + + if (ii != 0) + return ii; /* We do not even look at the reply, so we need not * swap the multi-byte fields. @@ -2036,19 +2400,341 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* + * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port. + * @ioc: Pointer to MPT_ADAPTER structure + * @sleepFlag: Specifies whether the process can sleep + * + * Returns 0 for success, >0 for handshake failure + * <0 for fw upload failure. + * + * Remark: If bound IOC and a successful FWUpload was performed + * on the bound IOC, the second image is discarded + * and memory is free'd. Both channels must upload to prevent + * IOC from running in degraded mode. + */ +static int +mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) +{ + u8 request[sizeof(FWUpload_t) + 24]; + u8 reply[sizeof(FWUploadReply_t)]; + FWUpload_t *prequest; + FWUploadReply_t *preply; + FWUploadTCSGE_t *ptcsge = NULL; + MptSge_t *psge; + u8 *mem; + dma_addr_t dma_addr; + int sgeoffset; + int i, sz, req_sz, reply_sz; + int cmdStatus, freeMem = 0; + + /* If the image size is 0 or if the pointer is + * not NULL (error), we are done. + */ + if (((sz = ioc->facts.FWImageSize) == 0) || ioc->FWImage) + return 0; + + /* Allocate memory + */ + mem = pci_alloc_consistent(ioc->pcidev, sz, &ioc->FWImage_dma); + if (mem == NULL) + return -1; + + memset(mem, 0, sz); + ioc->alloc_total += sz; + ioc->FWImage = mem; + dprintk((KERN_INFO MYNAM ": FW Image @ %p[%p], sz=%d bytes\n", + mem, (void *)(ulong)ioc->FWImage_dma, sz)); + + dma_addr = ioc->FWImage_dma; + + prequest = (FWUpload_t *)&request; + preply = (FWUploadReply_t *)&reply; + + /* Destination... */ + req_sz = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + + sizeof(FWUploadTCSGE_t) + sizeof(MptSge_t); + memset(prequest, 0, req_sz); + + reply_sz = sizeof(reply); + memset(preply, 0, reply_sz); + + prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; + prequest->Function = MPI_FUNCTION_FW_UPLOAD; + prequest->MsgContext = 0; /* anything */ + + ptcsge = (FWUploadTCSGE_t *) &prequest->SGL; + ptcsge->Reserved = 0; + ptcsge->ContextSize = 0; + ptcsge->DetailsLength = 12; + ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; + ptcsge->Reserved1 = 0; + ptcsge->ImageOffset = 0; + ptcsge->ImageSize = cpu_to_le32(sz); + + sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t); + psge = (MptSge_t *) &request[sgeoffset]; + psge->FlagsLength = cpu_to_le32(MPT_SGE_FLAGS_SSIMPLE_READ | (u32) sz); + + cpu_to_leXX(dma_addr, psge->Address); + + dprintk((MYIOC_s_INFO_FMT "Sending FW Upload (req @ %p)\n", + ioc->name, prequest)); + + i = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)prequest, + reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); + + cmdStatus = -EFAULT; + if (i == 0) { + /* Handshake transfer was complete and successful. + * Check the Reply Frame. + */ + int status, transfer_sz; + status = le16_to_cpu(preply->IOCStatus); + if (status == MPI_IOCSTATUS_SUCCESS) { + transfer_sz = le32_to_cpu(preply->ActualImageSize); + if (transfer_sz == sz) + cmdStatus = 0; + } + } + dprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n", + ioc->name, cmdStatus)); + + /* Check to see if we have a copy of this image in + * host memory already. + */ + if (cmdStatus == 0) { + if (ioc->alt_ioc && ioc->alt_ioc->FWImage) + freeMem = 1; + } + + /* We already have a copy of this image or + * we had some type of an error - either the handshake + * failed (i != 0) or the command did not complete successfully. + */ + if (cmdStatus || freeMem) { + dprintk((MYIOC_s_INFO_FMT ": do_upload freeing %s image \n", + ioc->name, cmdStatus ? "incomplete" : "duplicate")); + + pci_free_consistent(ioc->pcidev, sz, + ioc->FWImage, ioc->FWImage_dma); + ioc->FWImage = NULL; + ioc->alloc_total -= sz; + } + + return cmdStatus; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mpt_downloadboot - DownloadBoot code + * @ioc: Pointer to MPT_ADAPTER structure + * @flag: Specify which part of IOC memory is to be uploaded. + * @sleepFlag: Specifies whether the process can sleep + * + * FwDownloadBoot requires Programmed IO access. + * + * Returns 0 for success + * -1 FW Image size is 0 + * -2 No valid FWImage Pointer + * <0 for fw upload failure. + */ +static int +mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag) +{ + MpiFwHeader_t *FwHdr = NULL; + MpiExtImageHeader_t *ExtHdr; + int fw_sz; + u32 diag0val; +#ifdef MPT_DEBUG + u32 diag1val = 0; +#endif + int count = 0; + u32 *ptru32 = NULL; + u32 diagRwData; + u32 nextImage; + + dprintk((MYIOC_s_INFO_FMT "DbGb0: downloadboot entered.\n", + ioc->name)); +#ifdef MPT_DEBUG + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbGb1: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + + dprintk((MYIOC_s_INFO_FMT "fw size 0x%x, ioc FW Ptr %p\n", + ioc->name, ioc->facts.FWImageSize, ioc->FWImage)); + if (ioc->alt_ioc) + dprintk((MYIOC_s_INFO_FMT "alt ioc FW Ptr %p\n", + ioc->name, ioc->alt_ioc->FWImage)); + + /* Get dma_addr and data transfer size. + */ + if ((fw_sz = ioc->facts.FWImageSize) == 0) + return -1; + + /* Get the DMA from ioc or ioc->alt_ioc */ + if (ioc->FWImage) + FwHdr = (MpiFwHeader_t *)ioc->FWImage; + else if (ioc->alt_ioc && ioc->alt_ioc->FWImage) + FwHdr = (MpiFwHeader_t *)ioc->alt_ioc->FWImage; + + dprintk((MYIOC_s_INFO_FMT "DbGb2: FW Image @ %p\n", + ioc->name, FwHdr)); + + if (!FwHdr) + return -2; + + /* Write magic sequence to WriteSequence register + * until enter diagnostic mode + */ + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + while ((diag0val & MPI_DIAG_DRWE) == 0) { + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); + + /* wait 100 msec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(100 * HZ / 1000); + } else { + mdelay (100); + } + + count++; + if (count > 20) { + printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", + ioc->name, diag0val); + return -EFAULT; + + } + + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbGb3: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + dprintk((MYIOC_s_INFO_FMT "Wrote magic DiagWriteEn sequence (%x)\n", + ioc->name, diag0val)); + } + + /* Set the DiagRwEn and Disable ARM bits */ + diag0val |= (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM); + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); + +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbGb3: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + + /* Write the LoadStartAddress to the DiagRw Address Register + * using Programmed IO + */ + + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, FwHdr->LoadStartAddress); + dprintk((MYIOC_s_INFO_FMT "LoadStart addr written 0x%x \n", + ioc->name, FwHdr->LoadStartAddress)); + + nextImage = FwHdr->NextImageHeaderOffset; + + /* round up count to a 32bit alignment */ + ptru32 = (u32 *) FwHdr; + count = (FwHdr->ImageSize + 3)/4; + + dprintk((MYIOC_s_INFO_FMT "Write FW Image: 0x%x u32's @ %p\n", + ioc->name, count, ptru32)); + while (count-- ) { + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptru32); + ptru32++; + } + + dprintk((MYIOC_s_INFO_FMT "FW Image done! \n", ioc->name)); + + while (nextImage) { + + /* Set the pointer to the extended image + */ + ExtHdr = (MpiExtImageHeader_t *) ((char *) FwHdr + nextImage); + + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, ExtHdr->LoadStartAddress); + + count = (ExtHdr->ImageSize + 3 )/4; + + ptru32 = (u32 *) ExtHdr; + dprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x u32's @ %p\n", + ioc->name, count, ptru32)); + while (count-- ) { + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptru32); + ptru32++; + } + nextImage = ExtHdr->NextImageHeaderOffset; + } + + + /* Write the IopResetVectorRegAddr */ + dprintk((MYIOC_s_INFO_FMT "Write IopResetVector Addr! \n", ioc->name)); + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, FwHdr->IopResetRegAddr); + + /* Write the IopResetVectorValue */ + dprintk((MYIOC_s_INFO_FMT "Write IopResetVector Value! \n", ioc->name)); + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, FwHdr->IopResetVectorValue); + + /* Clear the internal flash bad bit - autoincrementing register, + * so must do two writes. + */ + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); + diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData); + diagRwData |= 0x4000000; + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); + + /* clear the RW enable and DISARM bits */ + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + diag0val &= ~(MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE | MPI_DIAG_FLASH_BAD_SIG); + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); + + /* Write 0xFF to reset the sequencer */ + CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* * KickStart - Perform hard reset of MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @force: Force hard reset + * @sleepFlag: Specifies whether the process can sleep * * This routine places MPT adapter in diagnostic mode via the * WriteSequence register, and then performs a hard reset of adapter * via the Diagnostic register. * - * Returns 0 for soft reset success, 1 for hard reset success, - * else a negative value for failure. + * Inputs: sleepflag - CAN_SLEEP (non-interrupt thread) + * or NO_SLEEP (interrupt thread, use mdelay) + * force - 1 if doorbell active, board fault state + * board operational, IOC_RECOVERY or + * IOC_BRINGUP and there is an alt_ioc. + * 0 else + * + * Returns: + * 1 - hard reset, READY + * 0 - no reset due to History bit, READY + * -1 - no reset due to History bit but not READY + * OR reset but failed to come READY + * -2 - no reset, could not enter DIAG mode + * -3 - reset but bad FW bit */ static int -KickStart(MPT_ADAPTER *ioc, int force) +KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag) { int hard_reset_done = 0; u32 ioc_state; @@ -2056,183 +2742,295 @@ dprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name)); - hard_reset_done = mpt_fc9x9_reset(ioc, force); -#if 0 - if (ioc->chip_type == FC909 || ioc->chip-type == FC919) { - hard_reset_done = mpt_fc9x9_reset(ioc, force); - } else if (ioc->chip_type == FC929) { - unsigned long delta; - - delta = jiffies - ioc->last_kickstart; - dprintk((KERN_INFO MYNAM ": %s: 929 KickStart, last=%ld, delta = %ld\n", - ioc->name, ioc->last_kickstart, delta)); - if ((ioc->sod_reset == 0) || (delta >= 10*HZ)) - hard_reset_done = mpt_fc9x9_reset(ioc, ignore); - else { - dprintk((KERN_INFO MYNAM ": %s: Skipping KickStart (delta=%ld)!\n", - ioc->name, delta)); - return 0; - } - /* TODO! Add C1030! - } else if (ioc->chip_type == C1030) { - */ - } else { - printk(KERN_ERR MYNAM ": %s: ERROR - Bad chip_type (0x%x)\n", - ioc->name, ioc->chip_type); - return -5; - } -#endif - + hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag); if (hard_reset_done < 0) return hard_reset_done; - dprintk((KERN_INFO MYNAM ": %s: Diagnostic reset successful\n", + dprintk((MYIOC_s_INFO_FMT "Diagnostic reset successful!\n", ioc->name)); for (cnt=0; cntname, cnt)); return hard_reset_done; } - /* udelay(10000) ? */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay (10); + } } - printk(KERN_ERR MYNAM ": %s: ERROR - Failed to come READY after reset!\n", + printk(MYIOC_s_ERR_FMT "Failed to come READY after reset!\n", ioc->name); return -1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * mpt_fc9x9_reset - Perform hard reset of FC9x9 adapter. + * mpt_diag_reset - Perform hard reset of the adapter. * @ioc: Pointer to MPT_ADAPTER structure - * - * This routine places FC9x9 adapter in diagnostic mode via the - * WriteSequence register, and then performs a hard reset of adapter - * via the Diagnostic register. - * - * Returns 0 for success, non-zero for failure. + * @ignore: Set if to honor and clear to ignore + * the reset history bit + * @sleepflag: CAN_SLEEP if called in a non-interrupt thread, + * else set to NO_SLEEP (use mdelay instead) + * + * This routine places the adapter in diagnostic mode via the + * WriteSequence register and then performs a hard reset of adapter + * via the Diagnostic register. Adapter should be in ready state + * upon successful completion. + * + * Returns: 1 hard reset successful + * 0 no reset performed because reset history bit set + * -2 enabling diagnostic mode failed + * -3 diagnostic reset failed */ static int -mpt_fc9x9_reset(MPT_ADAPTER *ioc, int ignore) +mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) { u32 diag0val; + u32 doorbell; int hard_reset_done = 0; + int count = 0; +#ifdef MPT_DEBUG + u32 diag1val = 0; +#endif - /* Use "Diagnostic reset" method! (only thing available!) */ + /* Clear any existing interrupts */ + CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); + /* Use "Diagnostic reset" method! (only thing available!) */ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + #ifdef MPT_DEBUG -{ - u32 diag1val = 0; if (ioc->alt_ioc) diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DBG1: diag0=%08x, diag1=%08x\n", + dprintk((MYIOC_s_INFO_FMT "DbG1: diag0=%08x, diag1=%08x\n", ioc->name, diag0val, diag1val)); -} #endif - if (diag0val & MPI_DIAG_DRWE) { - dprintk((KERN_INFO MYNAM ": %s: DiagWriteEn bit already set\n", - ioc->name)); - } else { - /* Write magic sequence to WriteSequence register */ - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); - dprintk((KERN_INFO MYNAM ": %s: Wrote magic DiagWriteEn sequence [spot#1]\n", - ioc->name)); - } - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + /* Do the reset if we are told to ignore the reset history + * or if the reset history is 0 + */ + if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) { + while ((diag0val & MPI_DIAG_DRWE) == 0) { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); + + /* wait 100 msec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(100 * HZ / 1000); + } else { + mdelay (100); + } + + count++; + if (count > 20) { + printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", + ioc->name, diag0val); + return -2; + + } + + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + + dprintk((MYIOC_s_INFO_FMT "Wrote magic DiagWriteEn sequence (%x)\n", + ioc->name, diag0val)); + } + #ifdef MPT_DEBUG -{ - u32 diag1val = 0; - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DbG2: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); -} + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbG2: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); #endif - if (!ignore && (diag0val & MPI_DIAG_RESET_HISTORY)) { - dprintk((KERN_INFO MYNAM ": %s: Skipping due to ResetHistory bit set!\n", - ioc->name)); - } else { + /* Write the PreventIocBoot bit */ + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + diag0val |= MPI_DIAG_PREVENT_IOC_BOOT; + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); + } + + /* + * Disable the ARM (Bug fix) + * + */ + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM); + mdelay (1); + /* * Now hit the reset bit in the Diagnostic register - * (THE BIG HAMMER!) + * (THE BIG HAMMER!) (Clears DRWE bit). */ - CHIPREG_WRITE32(&ioc->chip->Diagnostic, MPI_DIAG_RESET_ADAPTER); + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER); hard_reset_done = 1; - dprintk((KERN_INFO MYNAM ": %s: Diagnostic reset performed\n", + dprintk((MYIOC_s_INFO_FMT "Diagnostic reset performed\n", ioc->name)); - /* want udelay(100) */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + /* + * Call each currently registered protocol IOC reset handler + * with pre-reset indication. + * NOTE: If we're doing _IOC_BRINGUP, there can be no + * MptResetHandlers[] registered yet. + */ + { + int ii; + int r = 0; + + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + if (MptResetHandlers[ii]) { + dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n", + ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET); + if (ioc->alt_ioc) { + dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n", + ioc->name, ioc->alt_ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); + } + } + } + /* FIXME? Examine results here? */ + } + + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + /* If the DownloadBoot operation fails, the + * IOC will be left unusable. This is a fatal error + * case. _diag_reset will return < 0 + */ + for (count = 0; count < 30; count ++) { + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT + "DbG2b: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { + break; + } - /* Write magic sequence to WriteSequence register */ + /* wait 1 sec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + } else { + mdelay (1000); + } + } + if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) { + printk(KERN_WARNING MYNAM + ": firmware downloadboot failure (%d)!\n", count); + } + + } else { + /* Wait for FW to reload and for board + * to go to the READY state. + * Maximum wait is 30 seconds. + * If fail, no error will check again + * with calling program. + */ + for (count = 0; count < 30; count ++) { + doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); + doorbell &= MPI_IOC_STATE_MASK; + + if (doorbell == MPI_IOC_STATE_READY) { + break; + } + + /* wait 1 sec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + } else { + mdelay (1000); + } + } + } + } + + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbG3: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + + /* Clear RESET_HISTORY bit! Place board in the + * diagnostic mode to update the diag register. + */ + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + count = 0; + while ((diag0val & MPI_DIAG_DRWE) == 0) { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); - dprintk((KERN_INFO MYNAM ": %s: Wrote magic DiagWriteEn sequence [spot#2]\n", - ioc->name)); - } - /* Clear RESET_HISTORY bit! */ - CHIPREG_WRITE32(&ioc->chip->Diagnostic, 0x0); + /* wait 100 msec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(100 * HZ / 1000); + } else { + mdelay (100); + } + count++; + if (count > 20) { + printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", + ioc->name, diag0val); + break; + } + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + } + diag0val &= ~MPI_DIAG_RESET_HISTORY; + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -#ifdef MPT_DEBUG -{ - u32 diag1val = 0; - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DbG3: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); -} -#endif if (diag0val & MPI_DIAG_RESET_HISTORY) { - printk(KERN_WARNING MYNAM ": %s: WARNING - ResetHistory bit failed to clear!\n", + printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n", ioc->name); } + /* Disable Diagnostic Mode + */ + CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF); + + /* Check FW reload status flags. + */ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) { + printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n", + ioc->name, diag0val); + return -3; + } + #ifdef MPT_DEBUG -{ - u32 diag1val = 0; if (ioc->alt_ioc) diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DbG4: diag0=%08x, diag1=%08x\n", + dprintk((MYIOC_s_INFO_FMT "DbG4: diag0=%08x, diag1=%08x\n", ioc->name, diag0val, diag1val)); -} #endif - if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) { - printk(KERN_ERR MYNAM ": %s: ERROR - Diagnostic reset FAILED! (%02xh)\n", - ioc->name, diag0val); - return -3; - } /* * Reset flag that says we've enabled event notification */ ioc->facts.EventState = 0; - /* NEW! 20010220 -sralston - * Try to avoid redundant resets of the 929. - */ - ioc->sod_reset++; - ioc->last_kickstart = jiffies; - if (ioc->alt_ioc) { - ioc->alt_ioc->sod_reset = ioc->sod_reset; - ioc->alt_ioc->last_kickstart = ioc->last_kickstart; - } + if (ioc->alt_ioc) + ioc->alt_ioc->facts.EventState = 0; return hard_reset_done; } @@ -2249,16 +3047,45 @@ * Returns 0 for success, non-zero for failure. */ static int -SendIocReset(MPT_ADAPTER *ioc, u8 reset_type) +SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag) { int r; + u32 state; + int cntdn, count; dprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n", ioc->name, reset_type)); CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<name, (count+5)/HZ); + return -ETIME; + } + + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay (1); /* 1 msec delay */ + } + } + /* TODO! * Cleanup all event stuff for this IOC; re-issue EventNotification * request if needed. @@ -2275,7 +3102,8 @@ * @ioc: Pointer to MPT_ADAPTER structure * * This routine allocates memory for the MPT reply and request frame - * pools, and primes the IOC reply FIFO with reply frames. + * pools (if necessary), and primes the IOC reply FIFO with + * reply frames. * * Returns 0 for success, non-zero for failure. */ @@ -2284,6 +3112,7 @@ { MPT_FRAME_HDR *mf; unsigned long b; + unsigned long flags; dma_addr_t aligned_mem_dma; u8 *mem, *aligned_mem; int i, sz; @@ -2299,8 +3128,8 @@ memset(mem, 0, sz); ioc->alloc_total += sz; ioc->reply_alloc = mem; - dprintk((KERN_INFO MYNAM ": %s.reply_alloc @ %p[%08x], sz=%d bytes\n", - ioc->name, mem, ioc->reply_alloc_dma, sz)); + dprintk((KERN_INFO MYNAM ": %s.reply_alloc @ %p[%p], sz=%d bytes\n", + ioc->name, mem, (void *)(ulong)ioc->reply_alloc_dma, sz)); b = (unsigned long) mem; b = (b + (0x80UL - 1UL)) & ~(0x80UL - 1UL); /* round up to 128-byte boundary */ @@ -2308,15 +3137,20 @@ ioc->reply_frames = (MPT_FRAME_HDR *) aligned_mem; ioc->reply_frames_dma = (ioc->reply_alloc_dma + (aligned_mem - mem)); - aligned_mem_dma = ioc->reply_frames_dma; - dprintk((KERN_INFO MYNAM ": %s.reply_frames @ %p[%08x]\n", - ioc->name, aligned_mem, aligned_mem_dma)); - - for (i = 0; i < ioc->reply_depth; i++) { - /* Write each address to the IOC! */ - CHIPREG_WRITE32(&ioc->chip->ReplyFifo, aligned_mem_dma); - aligned_mem_dma += ioc->reply_sz; - } + + ioc->reply_frames_low_dma = (u32) (ioc->reply_frames_dma & 0xFFFFFFFF); + } + + /* Post Reply frames to FIFO + */ + aligned_mem_dma = ioc->reply_frames_dma; + dprintk((KERN_INFO MYNAM ": %s.reply_frames @ %p[%p]\n", + ioc->name, ioc->reply_frames, (void *)(ulong)aligned_mem_dma)); + + for (i = 0; i < ioc->reply_depth; i++) { + /* Write each address to the IOC! */ + CHIPREG_WRITE32(&ioc->chip->ReplyFifo, aligned_mem_dma); + aligned_mem_dma += ioc->reply_sz; } @@ -2336,8 +3170,8 @@ memset(mem, 0, sz); ioc->alloc_total += sz; ioc->req_alloc = mem; - dprintk((KERN_INFO MYNAM ": %s.req_alloc @ %p[%08x], sz=%d bytes\n", - ioc->name, mem, ioc->req_alloc_dma, sz)); + dprintk((KERN_INFO MYNAM ": %s.req_alloc @ %p[%p], sz=%d bytes\n", + ioc->name, mem, (void *)(ulong)ioc->req_alloc_dma, sz)); b = (unsigned long) mem; b = (b + (0x80UL - 1UL)) & ~(0x80UL - 1UL); /* round up to 128-byte boundary */ @@ -2345,18 +3179,18 @@ ioc->req_frames = (MPT_FRAME_HDR *) aligned_mem; ioc->req_frames_dma = (ioc->req_alloc_dma + (aligned_mem - mem)); - aligned_mem_dma = ioc->req_frames_dma; - dprintk((KERN_INFO MYNAM ": %s.req_frames @ %p[%08x]\n", - ioc->name, aligned_mem, aligned_mem_dma)); + ioc->req_frames_low_dma = (u32) (ioc->req_frames_dma & 0xFFFFFFFF); - for (i = 0; i < ioc->req_depth; i++) { - mf = (MPT_FRAME_HDR *) aligned_mem; - - /* Queue REQUESTs *internally*! */ - Q_ADD_TAIL(&ioc->FreeQ.head, &mf->u.frame.linkage, MPT_FRAME_HDR); - aligned_mem += ioc->req_sz; +#ifdef __ia64__ + /* Check: upper 32-bits of the request and reply frame + * physical addresses must be the same. + * ia64 check only + */ + if ((ioc->req_frames_dma >> 32) != (ioc->reply_frames_dma >> 32)){ + goto out_fail; } +#endif #if defined(CONFIG_MTRR) && 0 /* @@ -2367,20 +3201,38 @@ ioc->mtrr_reg = mtrr_add(ioc->req_alloc_dma, sz, MTRR_TYPE_WRCOMB, 1); - dprintk((KERN_INFO MYNAM ": %s: MTRR region registered (base:size=%08x:%x)\n", - ioc->name, ioc->req_alloc_dma, - sz )); + dprintk((MYIOC_s_INFO_FMT "MTRR region registered (base:size=%08x:%x)\n", + ioc->name, ioc->req_alloc_dma, sz)); #endif + } + /* Initialize Request frames linked list + */ + aligned_mem_dma = ioc->req_frames_dma; + aligned_mem = (u8 *) ioc->req_frames; + dprintk((KERN_INFO MYNAM ": %s.req_frames @ %p[%p]\n", + ioc->name, aligned_mem, (void *)(ulong)aligned_mem_dma)); + + spin_lock_irqsave(&ioc->FreeQlock, flags); + Q_INIT(&ioc->FreeQ, MPT_FRAME_HDR); + for (i = 0; i < ioc->req_depth; i++) { + mf = (MPT_FRAME_HDR *) aligned_mem; + + /* Queue REQUESTs *internally*! */ + Q_ADD_TAIL(&ioc->FreeQ.head, &mf->u.frame.linkage, MPT_FRAME_HDR); + aligned_mem += ioc->req_sz; } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + if (ioc->sense_buf_pool == NULL) { - sz = (ioc->req_depth * 256); + sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); ioc->sense_buf_pool = pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma); if (ioc->sense_buf_pool == NULL) goto out_fail; + ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF); ioc->alloc_total += sz; } @@ -2408,7 +3260,7 @@ #if defined(CONFIG_MTRR) && 0 if (ioc->mtrr_reg > 0) { mtrr_del(ioc->mtrr_reg, 0, 0); - dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", + dprintk((MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name)); } #endif @@ -2417,7 +3269,7 @@ ioc->alloc_total -= sz; } if (ioc->sense_buf_pool != NULL) { - sz = (ioc->req_depth * 256); + sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); pci_free_consistent(ioc->pcidev, sz, ioc->sense_buf_pool, ioc->sense_buf_pool_dma); @@ -2427,8 +3279,8 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/* - * HandShakeReqAndReply - Send MPT request to and receive reply from +/** + * mpt_handshake_req_reply_wait - Send MPT request to and receive reply from * IOC via doorbell handshake method. * @ioc: Pointer to MPT_ADAPTER structure * @reqBytes: Size of the request in bytes @@ -2436,6 +3288,7 @@ * @replyBytes: Expected size of the reply in bytes * @u16reply: Pointer to area where reply should be written * @maxwait: Max wait time for a reply (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * NOTES: It is the callers responsibility to byte-swap fields in the * request which are greater than 1 byte in size. It is also the @@ -2444,8 +3297,9 @@ * * Returns 0 for success, non-zero for failure. */ -static int -HandShakeReqAndReply(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait) +int +mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, + int replyBytes, u16 *u16reply, int maxwait, int sleepFlag) { MPIDefaultReply_t *mptReply; int failcnt = 0; @@ -2471,57 +3325,61 @@ /* * Wait for IOC's doorbell handshake int */ - if ((t = WaitForDoorbellInt(ioc, 2)) < 0) + if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) failcnt++; - dhsprintk((KERN_INFO MYNAM ": %s: HandShake request start, WaitCnt=%d%s\n", + dhsprintk((MYIOC_s_INFO_FMT "HandShake request start, WaitCnt=%d%s\n", ioc->name, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); + /* Read doorbell and check for active bit */ + if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE)) + return -1; + /* * Clear doorbell int (WRITE 0 to IntStatus reg), * then wait for IOC to ACKnowledge that it's ready for * our handshake request. */ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); - if (!failcnt && (t = WaitForDoorbellAck(ioc, 2)) < 0) + if (!failcnt && (t = WaitForDoorbellAck(ioc, 2, sleepFlag)) < 0) failcnt++; if (!failcnt) { - int i; + int ii; u8 *req_as_bytes = (u8 *) req; /* * Stuff request words via doorbell handshake, * with ACK from IOC for each. */ - for (i = 0; !failcnt && i < reqBytes/4; i++) { - u32 word = ((req_as_bytes[(i*4) + 0] << 0) | - (req_as_bytes[(i*4) + 1] << 8) | - (req_as_bytes[(i*4) + 2] << 16) | - (req_as_bytes[(i*4) + 3] << 24)); + for (ii = 0; !failcnt && ii < reqBytes/4; ii++) { + u32 word = ((req_as_bytes[(ii*4) + 0] << 0) | + (req_as_bytes[(ii*4) + 1] << 8) | + (req_as_bytes[(ii*4) + 2] << 16) | + (req_as_bytes[(ii*4) + 3] << 24)); CHIPREG_WRITE32(&ioc->chip->Doorbell, word); - if ((t = WaitForDoorbellAck(ioc, 2)) < 0) + if ((t = WaitForDoorbellAck(ioc, 2, sleepFlag)) < 0) failcnt++; } dmfprintk((KERN_INFO MYNAM ": Handshake request frame (@%p) header\n", req)); DBG_DUMP_REQUEST_FRAME_HDR(req) - dhsprintk((KERN_INFO MYNAM ": %s: HandShake request post done, WaitCnt=%d%s\n", + dhsprintk((MYIOC_s_INFO_FMT "HandShake request post done, WaitCnt=%d%s\n", ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : "")); /* * Wait for completion of doorbell handshake reply from the IOC */ - if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait)) < 0) + if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0) failcnt++; /* * Copy out the cached reply... */ - for(i=0; i < MIN(replyBytes/2,mptReply->MsgLength*2); i++) - u16reply[i] = ioc->hs_reply[i]; + for (ii=0; ii < MIN(replyBytes/2,mptReply->MsgLength*2); ii++) + u16reply[ii] = ioc->hs_reply[ii]; } else { return -99; } @@ -2535,6 +3393,7 @@ * in it's IntStatus register. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * This routine waits (up to ~2 seconds max) for IOC doorbell * handshake ACKnowledge. @@ -2542,28 +3401,40 @@ * Returns a negative value on failure, else wait loop count. */ static int -WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong) +WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag) { int cntdn = HZ * howlong; int count = 0; u32 intstat; - while (--cntdn) { - intstat = CHIPREG_READ32(&ioc->chip->IntStatus); - if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) - break; - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - count++; + if (sleepFlag == CAN_SLEEP) { + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) + break; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + count++; + } + } else { + cntdn *= 10; /* convert to msec */ + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) + break; + mdelay (1); + count++; + } + count /= 10; } if (cntdn) { - dhsprintk((KERN_INFO MYNAM ": %s: WaitForDoorbell ACK (cnt=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell ACK (cnt=%d)\n", ioc->name, count)); return count; } - printk(KERN_ERR MYNAM ": %s: ERROR - Doorbell ACK timeout(%d)!\n", + printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout(%d)!\n", ioc->name, (count+5)/HZ); return -1; } @@ -2574,34 +3445,47 @@ * in it's IntStatus register. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt. * * Returns a negative value on failure, else wait loop count. */ static int -WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong) +WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag) { int cntdn = HZ * howlong; int count = 0; u32 intstat; - while (--cntdn) { - intstat = CHIPREG_READ32(&ioc->chip->IntStatus); - if (intstat & MPI_HIS_DOORBELL_INTERRUPT) - break; - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - count++; + if (sleepFlag == CAN_SLEEP) { + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (intstat & MPI_HIS_DOORBELL_INTERRUPT) + break; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + count++; + } + } else { + cntdn *= 10; /* convert to msec */ + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (intstat & MPI_HIS_DOORBELL_INTERRUPT) + break; + mdelay(1); + count++; + } + count /= 10; } if (cntdn) { - dhsprintk((KERN_INFO MYNAM ": %s: WaitForDoorbell INT (cnt=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell INT (cnt=%d)\n", ioc->name, count)); return count; } - printk(KERN_ERR MYNAM ": %s: ERROR - Doorbell INT timeout(%d)!\n", + printk(MYIOC_s_ERR_FMT "Doorbell INT timeout(%d)!\n", ioc->name, (count+5)/HZ); return -1; } @@ -2611,6 +3495,7 @@ * WaitForDoorbellReply - Wait for and capture a IOC handshake reply. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * This routine polls the IOC for a handshake reply, 16 bits at a time. * Reply is cached to IOC private area large enough to hold a maximum @@ -2619,13 +3504,13 @@ * Returns a negative value on failure, else size of reply in WORDS. */ static int -WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong) +WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag) { int u16cnt = 0; int failcnt = 0; int t; u16 *hs_reply = ioc->hs_reply; - volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply; + volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply; u16 hword; hs_reply[0] = hs_reply[1] = hs_reply[7] = 0; @@ -2634,12 +3519,12 @@ * Get first two u16's so we can look at IOC's intended reply MsgLength */ u16cnt=0; - if ((t = WaitForDoorbellInt(ioc, howlong)) < 0) { + if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) { failcnt++; } else { hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); - if ((t = WaitForDoorbellInt(ioc, 2)) < 0) + if ((t = WaitForDoorbellInt(ioc, 2, sleepFlag)) < 0) failcnt++; else { hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); @@ -2647,7 +3532,7 @@ } } - dhsprintk((KERN_INFO MYNAM ": %s: First handshake reply word=%08x%s\n", + dhsprintk((MYIOC_s_INFO_FMT "First handshake reply word=%08x%s\n", ioc->name, le32_to_cpu(*(u32 *)hs_reply), failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); @@ -2656,7 +3541,7 @@ * reply 16 bits at a time. */ for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) { - if ((t = WaitForDoorbellInt(ioc, 2)) < 0) + if ((t = WaitForDoorbellInt(ioc, 2, sleepFlag)) < 0) failcnt++; hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); /* don't overflow our IOC hs_reply[] buffer! */ @@ -2665,12 +3550,12 @@ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); } - if (!failcnt && (t = WaitForDoorbellInt(ioc, 2)) < 0) + if (!failcnt && (t = WaitForDoorbellInt(ioc, 2, sleepFlag)) < 0) failcnt++; CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); if (failcnt) { - printk(KERN_ERR MYNAM ": %s: ERROR - Handshake reply failure!\n", + printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n", ioc->name); return -failcnt; } @@ -2683,10 +3568,10 @@ } #endif - dmfprintk((KERN_INFO MYNAM ": %s: Got Handshake reply:\n", ioc->name)); + dmfprintk((MYIOC_s_INFO_FMT "Got Handshake reply:\n", ioc->name)); DBG_DUMP_REPLY_FRAME(mptReply) - dhsprintk((KERN_INFO MYNAM ": %s: WaitForDoorbell REPLY (sz=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell REPLY (sz=%d)\n", ioc->name, u16cnt/2)); return u16cnt/2; } @@ -2696,115 +3581,616 @@ * GetLanConfigPages - Fetch LANConfig pages. * @ioc: Pointer to MPT_ADAPTER structure * - * Returns 0 for success, non-zero for failure. + * Return: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) */ static int GetLanConfigPages(MPT_ADAPTER *ioc) { - Config_t config_req; - ConfigReply_t config_reply; - LANPage0_t *page0; + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + LANPage0_t *ppage0_alloc; dma_addr_t page0_dma; - LANPage1_t *page1; + LANPage1_t *ppage1_alloc; dma_addr_t page1_dma; - int i; - int req_sz; - int reply_sz; + int rc = 0; int data_sz; + int copy_sz; -/* LANPage0 */ - /* Immediate destination (reply area)... */ - reply_sz = sizeof(config_reply); - memset(&config_reply, 0, reply_sz); - - /* Ultimate destination... */ - page0 = &ioc->lan_cnfg_page0; - data_sz = sizeof(*page0); - memset(page0, 0, data_sz); - - /* Request area (config_req on the stack right now!) */ - req_sz = sizeof(config_req); - memset(&config_req, 0, req_sz); - config_req.Function = MPI_FUNCTION_CONFIG; - config_req.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - /* config_req.Header.PageVersion = 0; */ - /* config_req.Header.PageLength = 0; */ - config_req.Header.PageNumber = 0; - config_req.Header.PageType = MPI_CONFIG_PAGETYPE_LAN; - /* config_req.PageAddress = 0; */ - config_req.PageBufferSGE.u.Simple.FlagsLength = cpu_to_le32( - ((MPI_SGE_FLAGS_LAST_ELEMENT | - MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_END_OF_LIST | - MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_SYSTEM_ADDRESS | - MPI_SGE_FLAGS_32_BIT_ADDRESSING | - MPI_SGE_FLAGS_32_BIT_CONTEXT) << MPI_SGE_FLAGS_SHIFT) | - (u32)data_sz - ); - page0_dma = pci_map_single(ioc->pcidev, page0, data_sz, PCI_DMA_FROMDEVICE); - config_req.PageBufferSGE.u.Simple.u.Address32 = cpu_to_le32(page0_dma); + /* Get LAN Page 0 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = 0; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength > 0) { + data_sz = hdr.PageLength * 4; + ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); + rc = -ENOMEM; + if (ppage0_alloc) { + memset((u8 *)ppage0_alloc, 0, data_sz); + cfg.physAddr = page0_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + copy_sz = MIN(sizeof(LANPage0_t), data_sz); + memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz); - dprintk((KERN_INFO MYNAM ": %s: Sending Config request LAN_PAGE_0\n", - ioc->name)); + } - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&config_req, - reply_sz, (u16*)&config_reply, 3); - pci_unmap_single(ioc->pcidev, page0_dma, data_sz, PCI_DMA_FROMDEVICE); - if (i != 0) - return i; + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); - /* Now byte swap the necessary LANPage0 fields */ + /* FIXME! + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ -/* LANPage1 */ - /* Immediate destination (reply area)... */ - reply_sz = sizeof(config_reply); - memset(&config_reply, 0, reply_sz); - - /* Ultimate destination... */ - page1 = &ioc->lan_cnfg_page1; - data_sz = sizeof(*page1); - memset(page1, 0, data_sz); - - /* Request area (config_req on the stack right now!) */ - req_sz = sizeof(config_req); - memset(&config_req, 0, req_sz); - config_req.Function = MPI_FUNCTION_CONFIG; - config_req.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - /* config_req.Header.PageVersion = 0; */ - /* config_req.Header.PageLength = 0; */ - config_req.Header.PageNumber = 1; - config_req.Header.PageType = MPI_CONFIG_PAGETYPE_LAN; - /* config_req.PageAddress = 0; */ - config_req.PageBufferSGE.u.Simple.FlagsLength = cpu_to_le32( - ((MPI_SGE_FLAGS_LAST_ELEMENT | - MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_END_OF_LIST | - MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_SYSTEM_ADDRESS | - MPI_SGE_FLAGS_32_BIT_ADDRESSING | - MPI_SGE_FLAGS_32_BIT_CONTEXT) << MPI_SGE_FLAGS_SHIFT) | - (u32)data_sz - ); - page1_dma = pci_map_single(ioc->pcidev, page1, data_sz, PCI_DMA_FROMDEVICE); - config_req.PageBufferSGE.u.Simple.u.Address32 = cpu_to_le32(page1_dma); + } - dprintk((KERN_INFO MYNAM ": %s: Sending Config request LAN_PAGE_1\n", - ioc->name)); + if (rc) + return rc; + } - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&config_req, - reply_sz, (u16*)&config_reply, 3); - pci_unmap_single(ioc->pcidev, page1_dma, data_sz, PCI_DMA_FROMDEVICE); - if (i != 0) - return i; + /* Get LAN Page 1 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 1; + hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma); + if (ppage1_alloc) { + memset((u8 *)ppage1_alloc, 0, data_sz); + cfg.physAddr = page1_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + copy_sz = MIN(sizeof(LANPage1_t), data_sz); + memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz); + } + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma); + + /* FIXME! + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ + + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * GetFcPortPage0 - Fetch FCPort config Page0. + * @ioc: Pointer to MPT_ADAPTER structure + * @portnum: IOC Port number + * + * Return: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + */ +static int +GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + FCPortPage0_t *ppage0_alloc; + FCPortPage0_t *pp0dest; + dma_addr_t page0_dma; + int data_sz; + int copy_sz; + int rc; + + /* Get FCPort Page 0 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = portnum; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); + if (ppage0_alloc) { + memset((u8 *)ppage0_alloc, 0, data_sz); + cfg.physAddr = page0_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + pp0dest = &ioc->fc_port_page0[portnum]; + copy_sz = MIN(sizeof(FCPortPage0_t), data_sz); + memcpy(pp0dest, ppage0_alloc, copy_sz); + + /* + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ + pp0dest->Flags = le32_to_cpu(pp0dest->Flags); + pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier); + pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low); + pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High); + pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low); + pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High); + pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass); + pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds); + pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed); + pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize); + pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low); + pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High); + pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low); + pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High); + pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount); + pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators); + + } + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * GetIoUnitPage2 - Retrieve BIOS version and boot order information. + * @ioc: Pointer to MPT_ADAPTER structure + * + * Returns: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + */ +static int +GetIoUnitPage2(MPT_ADAPTER *ioc) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + IOUnitPage2_t *ppage_alloc; + dma_addr_t page_dma; + int data_sz; + int rc; + + /* Get the page header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 2; + hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = 0; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + /* Read the config page */ + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); + if (ppage_alloc) { + memset((u8 *)ppage_alloc, 0, data_sz); + cfg.physAddr = page_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + /* If Good, save data */ + if ((rc = mpt_config(ioc, &cfg)) == 0) + ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion); + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma); + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2 + * @ioc: Pointer to a Adapter Strucutre + * @portnum: IOC port number + * + * Return: -EFAULT if read of config page header fails + * or if no nvram + * If read of SCSI Port Page 0 fails, + * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) + * Adapter settings: async, narrow + * Return 1 + * If read of SCSI Port Page 2 fails, + * Adapter settings valid + * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) + * Return 1 + * Else + * Both valid + * Return 0 + * CHECK - what type of locking mechanisms should be used???? + */ +static int +mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum) +{ + u8 *pbuf = NULL; + dma_addr_t buf_dma; + CONFIGPARMS cfg; + ConfigPageHeader_t header; + int ii; + int data, rc = 0; + + /* Allocate memory + */ + if (!ioc->spi_data.nvram) { + int sz; + u8 *mem; + sz = MPT_MAX_SCSI_DEVICES * sizeof(int); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + return -EFAULT; + + ioc->spi_data.nvram = (int *) mem; + + dprintk((MYIOC_s_INFO_FMT "SCSI device NVRAM settings @ %p, sz=%d\n", + ioc->name, ioc->spi_data.nvram, sz)); + } + + /* Invalidate NVRAM information + */ + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID; + } + + /* Read SPP0 header, allocate memory, then read page. + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 0; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = portnum; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; /* use default */ + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + if (header.PageLength > 0) { + pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) != 0) { + ioc->spi_data.maxBusWidth = MPT_NARROW; + ioc->spi_data.maxSyncOffset = 0; + ioc->spi_data.minSyncFactor = MPT_ASYNC; + ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN; + rc = 1; + } else { + /* Save the Port Page 0 data + */ + SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf; + pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities); + pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface); + + ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0; + data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK; + if (data) { + ioc->spi_data.maxSyncOffset = (u8) (data >> 16); + data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK; + ioc->spi_data.minSyncFactor = (u8) (data >> 8); + } else { + ioc->spi_data.maxSyncOffset = 0; + ioc->spi_data.minSyncFactor = MPT_ASYNC; + } + + ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK; + + /* Update the minSyncFactor based on bus type. + */ + if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) || + (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) { + + if (ioc->spi_data.minSyncFactor < MPT_ULTRA) + ioc->spi_data.minSyncFactor = MPT_ULTRA; + } + } + if (pbuf) { + pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + } + + /* SCSI Port Page 2 - Read the header then the page. + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 2; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = portnum; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + if (header.PageLength > 0) { + /* Allocate memory and read SCSI Port Page 2 + */ + pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM; + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) != 0) { + /* Nvram data is left with INVALID mark + */ + rc = 1; + } else { + SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf; + MpiDeviceInfo_t *pdevice = NULL; - /* Now byte swap the necessary LANPage1 fields */ + /* Save the Port Page 2 data + * (reformat into a 32bit quantity) + */ + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + pdevice = &pPP2->DeviceSettings[ii]; + data = (le16_to_cpu(pdevice->DeviceFlags) << 16) | + (pdevice->SyncFactor << 8) | pdevice->Timeout; + ioc->spi_data.nvram[ii] = data; + } + } + + pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + + /* Update Adapter limits with those from NVRAM + * Comment: Don't need to do this. Target performance + * parameters will never exceed the adapters limits. + */ + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mpt_readScsiDevicePageHeaders - save version and length of SDP1 + * @ioc: Pointer to a Adapter Strucutre + * @portnum: IOC port number + * + * Return: -EFAULT if read of config page header fails + * or 0 if success. + */ +static int +mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum) +{ + CONFIGPARMS cfg; + ConfigPageHeader_t header; + + /* Read the SCSI Device Page 1 header + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 1; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = portnum; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + ioc->spi_data.sdp1version = cfg.hdr->PageVersion; + ioc->spi_data.sdp1length = cfg.hdr->PageLength; + + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 0; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + ioc->spi_data.sdp0version = cfg.hdr->PageVersion; + ioc->spi_data.sdp0length = cfg.hdr->PageLength; return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** + * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes + * @ioc: Pointer to a Adapter Strucutre + * @portnum: IOC port number + * + * Return: + * 0 on success + * -EFAULT if read of config page header fails or data pointer not NULL + * -ENOMEM if pci_alloc failed + */ +static int +mpt_findImVolumes(MPT_ADAPTER *ioc) +{ + IOCPage2_t *pIoc2 = NULL; + IOCPage3_t *pIoc3 = NULL; + ConfigPageIoc2RaidVol_t *pIocRv = NULL; + u8 *mem; + dma_addr_t ioc2_dma; + dma_addr_t ioc3_dma; + CONFIGPARMS cfg; + ConfigPageHeader_t header; + int jj; + int rc = 0; + int iocpage2sz; + int iocpage3sz = 0; + u8 nVols, nPhys; + u8 vid, vbus, vioc; + + if (ioc->spi_data.pIocPg3) + return -EFAULT; + + /* Read IOCP2 header then the page. + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 2; + header.PageType = MPI_CONFIG_PAGETYPE_IOC; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + if (header.PageLength == 0) + return -EFAULT; + + iocpage2sz = header.PageLength * 4; + pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma); + if (!pIoc2) + return -ENOMEM; + + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.physAddr = ioc2_dma; + if (mpt_config(ioc, &cfg) != 0) + goto done_and_free; + + /* Identify RAID Volume Id's */ + nVols = pIoc2->NumActiveVolumes; + if ( nVols == 0) { + /* No RAID Volumes. Done. + */ + } else { + /* At least 1 RAID Volume + */ + pIocRv = pIoc2->RaidVolume; + ioc->spi_data.isRaid = 0; + for (jj = 0; jj < nVols; jj++, pIocRv++) { + vid = pIocRv->VolumeID; + vbus = pIocRv->VolumeBus; + vioc = pIocRv->VolumeIOC; + + /* find the match + */ + if (vbus == 0) { + ioc->spi_data.isRaid |= (1 << vid); + } else { + /* Error! Always bus 0 + */ + } + } + } + + /* Identify Hidden Physical Disk Id's */ + nPhys = pIoc2->NumActivePhysDisks; + if (nPhys == 0) { + /* No physical disks. Done. + */ + } else { + /* There is at least one physical disk. + * Read and save IOC Page 3 + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 3; + header.PageType = MPI_CONFIG_PAGETYPE_IOC; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + if (mpt_config(ioc, &cfg) != 0) + goto done_and_free; + + if (header.PageLength == 0) + goto done_and_free; + + /* Read Header good, alloc memory + */ + iocpage3sz = header.PageLength * 4; + pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma); + if (!pIoc3) + goto done_and_free; + + /* Read the Page and save the data + * into malloc'd memory. + */ + cfg.physAddr = ioc3_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + if (mpt_config(ioc, &cfg) == 0) { + mem = kmalloc(iocpage3sz, GFP_KERNEL); + if (mem) { + memcpy(mem, (u8 *)pIoc3, iocpage3sz); + ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem; + } + } + } + +done_and_free: + if (pIoc2) { + pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma); + pIoc2 = NULL; + } + + if (pIoc3) { + pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma); + pIoc3 = NULL; + } + + return rc; +} + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* * SendEventNotification - Send EventNotification (on or off) request * to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure @@ -2817,13 +4203,13 @@ evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc->id); if (evnp == NULL) { - dprintk((KERN_WARNING MYNAM ": %s: WARNING - Unable to allocate a event request frame!\n", + dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n", ioc->name)); return 0; } memset(evnp, 0, sizeof(*evnp)); - dprintk((KERN_INFO MYNAM ": %s: Sending EventNotification(%d)\n", ioc->name, EvSwitch)); + dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch)); evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; evnp->ChainOffset = 0; @@ -2847,13 +4233,13 @@ EventAck_t *pAck; if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc->id)) == NULL) { - printk(KERN_WARNING MYNAM ": %s: WARNING - Unable to allocate event ACK request frame!\n", + printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n", ioc->name); return -1; } memset(pAck, 0, sizeof(*pAck)); - dprintk((KERN_INFO MYNAM ": %s: Sending EventAck\n", ioc->name)); + dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); pAck->Function = MPI_FUNCTION_EVENT_ACK; pAck->ChainOffset = 0; @@ -2866,25 +4252,212 @@ return 0; } -#ifdef CONFIG_PROC_FS /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mpt_config - Generic function to issue config message + * @ioc - Pointer to an adapter structure + * @cfg - Pointer to a configuration structure. Struct contains + * action, page address, direction, physical address + * and pointer to a configuration page header + * Page header is updated. + * + * Returns 0 for success + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + */ +int +mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) +{ + Config_t *pReq; + MPT_FRAME_HDR *mf; + MptSge_t *psge; + unsigned long flags; + int ii, rc; + int flagsLength; + int in_isr; + + /* (Bugzilla:fibrebugs, #513) + * Bug fix (part 1)! 20010905 -sralston + * Prevent calling wait_event() (below), if caller happens + * to be in ISR context, because that is fatal! + */ + in_isr = in_interrupt(); + if (in_isr) { + dprintk((MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", + ioc->name)); + return -EPERM; + } + + /* Get and Populate a free Frame + */ + if ((mf = mpt_get_msg_frame(mpt_base_index, ioc->id)) == NULL) { + dprintk((MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", + ioc->name)); + return -EAGAIN; + } + pReq = (Config_t *)mf; + pReq->Action = pCfg->action; + pReq->Reserved = 0; + pReq->ChainOffset = 0; + pReq->Function = MPI_FUNCTION_CONFIG; + pReq->Reserved1[0] = 0; + pReq->Reserved1[1] = 0; + pReq->Reserved1[2] = 0; + pReq->MsgFlags = 0; + for (ii=0; ii < 8; ii++) + pReq->Reserved2[ii] = 0; + + pReq->Header.PageVersion = pCfg->hdr->PageVersion; + pReq->Header.PageLength = pCfg->hdr->PageLength; + pReq->Header.PageNumber = pCfg->hdr->PageNumber; + pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); + pReq->PageAddress = cpu_to_le32(pCfg->pageAddr); + + /* Add a SGE to the config request. + */ + flagsLength = ((MPI_SGE_FLAGS_LAST_ELEMENT | + MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPT_SGE_ADDRESS_SIZE ) << MPI_SGE_FLAGS_SHIFT) | + pCfg->hdr->PageLength * 4; + + if (pCfg->dir) + flagsLength |= (MPI_SGE_FLAGS_DIRECTION << MPI_SGE_FLAGS_SHIFT); + + psge = (MptSge_t *) &pReq->PageBufferSGE; + psge->FlagsLength = cpu_to_le32(flagsLength); + cpu_to_leXX(pCfg->physAddr, psge->Address); + + dprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n", + ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action)); + + /* Append pCfg pointer to end of mf + */ + *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; + + /* Initalize the timer + */ + init_timer(&pCfg->timer); + pCfg->timer.data = (unsigned long) ioc; + pCfg->timer.function = mpt_timer_expired; + pCfg->wait_done = 0; + + /* Set the timer; ensure 10 second minimum */ + if (pCfg->timeout < 10) + pCfg->timer.expires = jiffies + HZ*10; + else + pCfg->timer.expires = jiffies + HZ*pCfg->timeout; + + /* Add to end of Q, set timer and then issue this command */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + Q_ADD_TAIL(&ioc->configQ.head, &pCfg->linkage, Q_ITEM); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + add_timer(&pCfg->timer); + mpt_put_msg_frame(mpt_base_index, ioc->id, mf); + wait_event(mpt_waitq, pCfg->wait_done); + + /* mf has been freed - do not access */ + + rc = pCfg->status; + + return rc; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff... + * mpt_timer_expired - Call back for timer process. + * Used only internal config functionality. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + */ +static void +mpt_timer_expired(unsigned long data) +{ + MPT_ADAPTER *ioc = (MPT_ADAPTER *) data; + + dprintk((MYIOC_s_WARN_FMT "mpt_timer_expired! \n", ioc->name)); + + /* Perform a FW reload */ + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) + printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name); + + /* No more processing. + * Hard reset clean-up will wake up + * process and free all resources. + */ + dprintk((MYIOC_s_WARN_FMT "mpt_timer_expired complete!\n", ioc->name)); + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mpt_ioc_reset - Base cleanup for hard reset + * @ioc: Pointer to the adapter structure + * @reset_phase: Indicates pre- or post-reset functionality + * + * Remark: Free's resources with internally generated commands. */ +static int +mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + CONFIGPARMS *pCfg; + unsigned long flags; -#define PROC_MPT_READ_RETURN(page,start,off,count,eof,len) \ -{ \ - len -= off; \ - if (len < count) { \ - *eof = 1; \ - if (len <= 0) \ - return 0; \ - } else \ - len = count; \ - *start = page + off; \ - return len; \ + dprintk((KERN_WARNING MYNAM + ": IOC %s_reset routed to MPT base driver!\n", + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); + + if (reset_phase == MPT_IOC_PRE_RESET) { + /* If the internal config Q is not empty - + * delete timer. MF resources will be freed when + * the FIFO's are primed. + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&ioc->configQ)){ + pCfg = (CONFIGPARMS *)ioc->configQ.head; + do { + del_timer(&pCfg->timer); + pCfg = (CONFIGPARMS *) (pCfg->linkage.forw); + } while (pCfg != (CONFIGPARMS *)&ioc->configQ); + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + } else { + CONFIGPARMS *pNext; + + /* Search the configQ for internal commands. + * Flush the Q, and wake up all suspended threads. + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&ioc->configQ)){ + pCfg = (CONFIGPARMS *)ioc->configQ.head; + do { + pNext = (CONFIGPARMS *) pCfg->linkage.forw; + + Q_DEL_ITEM(&pCfg->linkage); + + pCfg->status = MPT_CONFIG_ERROR; + pCfg->wait_done = 1; + wake_up(&mpt_waitq); + + pCfg = pNext; + } while (pCfg != (CONFIGPARMS *)&ioc->configQ); + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + } + + return 1; /* currently means nothing really */ } + +#ifdef CONFIG_PROC_FS /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff... + */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries. @@ -2894,71 +4467,62 @@ static int procmpt_create(void) { - MPT_ADAPTER *ioc; - struct proc_dir_entry *ent; - int errcnt = 0; + MPT_ADAPTER *ioc; + struct proc_dir_entry *ent; + int ii; /* - * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" - * (single level) to multi level (e.g. "driver/message/fusion") - * something here needs to change. -sralston + * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" + * (single level) to multi level (e.g. "driver/message/fusion") + * something here needs to change. -sralston */ - procmpt_root_dir = CREATE_PROCDIR_ENTRY(MPT_PROCFS_MPTBASEDIR, NULL); - if (procmpt_root_dir == NULL) + mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL); + if (mpt_proc_root_dir == NULL) return -ENOTDIR; - if ((ioc = mpt_adapter_find_first()) != NULL) { - ent = create_proc_read_entry(MPT_PROCFS_SUMMARY_NODE, 0, NULL, procmpt_read_summary, NULL); - if (ent == NULL) { - printk(KERN_WARNING MYNAM ": WARNING - Could not create %s entry!\n", - MPT_PROCFS_SUMMARY_PATHNAME); - errcnt++; + for (ii=0; ii < MPT_PROC_ENTRIES; ii++) { + ent = create_proc_entry(mpt_proc_list[ii].name, + S_IFREG|S_IRUGO, mpt_proc_root_dir); + if (!ent) { + printk(KERN_WARNING MYNAM + ": WARNING - Could not create /proc/mpt/%s entry\n", + mpt_proc_list[ii].name); + continue; } + ent->read_proc = mpt_proc_list[ii].f; + ent->data = NULL; } + ioc = mpt_adapter_find_first(); while (ioc != NULL) { - char pname[32]; - int namelen; + struct proc_dir_entry *dent; /* * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter. */ - namelen = sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); - if ((ent = CREATE_PROCDIR_ENTRY(pname, NULL)) != NULL) { + if ((dent = proc_mkdir(ioc->name, mpt_proc_root_dir)) != NULL) { /* - * And populate it with: "summary" and "dbg" file entries. + * And populate it with mpt_ioc_proc_list[] entries. */ - (void) sprintf(pname+namelen, "/summary"); - ent = create_proc_read_entry(pname, 0, NULL, procmpt_read_summary, ioc); - if (ent == NULL) { - errcnt++; - printk(KERN_WARNING MYNAM ": %s: WARNING - Could not create /proc/%s entry!\n", - ioc->name, pname); - } -//#ifdef MPT_DEBUG - /* DEBUG aid! */ - (void) sprintf(pname+namelen, "/dbg"); - ent = create_proc_read_entry(pname, 0, NULL, procmpt_read_dbg, ioc); - if (ent == NULL) { - errcnt++; - printk(KERN_WARNING MYNAM ": %s: WARNING - Could not create /proc/%s entry!\n", - ioc->name, pname); + for (ii=0; ii < MPT_IOC_PROC_ENTRIES; ii++) { + ent = create_proc_entry(mpt_ioc_proc_list[ii].name, + S_IFREG|S_IRUGO, dent); + if (!ent) { + printk(KERN_WARNING MYNAM + ": WARNING - Could not create /proc/mpt/%s/%s entry!\n", + ioc->name, + mpt_ioc_proc_list[ii].name); + continue; + } + ent->read_proc = mpt_ioc_proc_list[ii].f; + ent->data = ioc; } -//#endif } else { - errcnt++; - printk(KERN_WARNING MYNAM ": %s: WARNING - Could not create /proc/%s entry!\n", - ioc->name, pname); - + printk(MYIOC_s_WARN_FMT "Could not create /proc/mpt/%s subdir entry!\n", + ioc->name, mpt_ioc_proc_list[ii].name); } - ioc = mpt_adapter_find_next(ioc); } - if (errcnt) { -// remove_proc_entry("mpt", 0); - return -ENOTDIR; - } - return 0; } @@ -2971,44 +4535,44 @@ static int procmpt_destroy(void) { - MPT_ADAPTER *ioc; + MPT_ADAPTER *ioc; + int ii; - if (!procmpt_root_dir) + if (!mpt_proc_root_dir) return 0; /* - * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" - * (single level) to multi level (e.g. "driver/message/fusion") - * something here needs to change. -sralston + * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" + * (single level) to multi level (e.g. "driver/message/fusion") + * something here needs to change. -sralston */ ioc = mpt_adapter_find_first(); - if (ioc != NULL) { - remove_proc_entry(MPT_PROCFS_SUMMARY_NODE, 0); - } - while (ioc != NULL) { char pname[32]; int namelen; + + namelen = sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); + /* * Tear down each "/proc/mpt/iocN" subdirectory. */ - namelen = sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); - (void) sprintf(pname+namelen, "/summary"); - remove_proc_entry(pname, 0); -//#ifdef MPT_DEBUG - (void) sprintf(pname+namelen, "/dbg"); - remove_proc_entry(pname, 0); -//#endif - (void) sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); - remove_proc_entry(pname, 0); + for (ii=0; ii < MPT_IOC_PROC_ENTRIES; ii++) { + (void) sprintf(pname+namelen, "/%s", mpt_ioc_proc_list[ii].name); + remove_proc_entry(pname, NULL); + } + + remove_proc_entry(ioc->name, mpt_proc_root_dir); ioc = mpt_adapter_find_next(ioc); } - if (atomic_read((atomic_t *)&procmpt_root_dir->count) == 0) { - remove_proc_entry(MPT_PROCFS_MPTBASEDIR, 0); - procmpt_root_dir = NULL; + for (ii=0; ii < MPT_PROC_ENTRIES; ii++) + remove_proc_entry(mpt_proc_list[ii].name, mpt_proc_root_dir); + + if (atomic_read((atomic_t *)&mpt_proc_root_dir->count) == 0) { + remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL); + mpt_proc_root_dir = NULL; return 0; } @@ -3016,23 +4580,23 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * procmpt_read_summary - Handle read request from /proc/mpt/summary +/* + * procmpt_summary_read - Handle read request from /proc/mpt/summary * or from /proc/mpt/iocN/summary. - * @page: Pointer to area to write information + * @buf: Pointer to area to write information * @start: Pointer to start pointer - * @off: Offset to start writing - * @count: + * @offset: Offset to start writing + * @request: * @eof: Pointer to EOF integer - * @data: Pointer + * @data: Pointer * - * Returns numbers of characters written to process performing the read. + * Returns number of characters written to process performing the read. */ static int -procmpt_read_summary(char *page, char **start, off_t off, int count, int *eof, void *data) +procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eof, void *data) { MPT_ADAPTER *ioc; - char *out = page; + char *out = buf; int len; if (data == NULL) @@ -3040,84 +4604,196 @@ else ioc = data; -// Too verbose! -// out += sprintf(out, "Attached Fusion MPT I/O Controllers:%s\n", ioc ? "" : " none"); - while (ioc) { int more = 0; -// Too verbose! -// mpt_print_ioc_facts(ioc, out, &more, 0); mpt_print_ioc_summary(ioc, out, &more, 0, 1); out += more; - if ((out-page) >= count) { + if ((out-buf) >= request) { break; } if (data == NULL) ioc = mpt_adapter_find_next(ioc); else - ioc = NULL; /* force exit for iocN */ + ioc = NULL; /* force exit for iocN */ } - len = out - page; + len = out - buf; - PROC_MPT_READ_RETURN(page,start,off,count,eof,len); + MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len); } -// debug aid! /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * procmpt_read_dbg - Handle read request from /proc/mpt/iocN/dbg. - * @page: Pointer to area to write information +/* + * procmpt_version_read - Handle read request from /proc/mpt/version. + * @buf: Pointer to area to write information * @start: Pointer to start pointer - * @off: Offset to start writing - * @count: + * @offset: Offset to start writing + * @request: * @eof: Pointer to EOF integer - * @data: Pointer + * @data: Pointer * - * Returns numbers of characters written to process performing the read. + * Returns number of characters written to process performing the read. */ static int -procmpt_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data) +procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - MPT_ADAPTER *ioc; - char *out = page; - int len; + int ii; + int scsi, lan, ctl, targ, dmp; + char *drvname; + int len; + + len = sprintf(buf, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON); + len += sprintf(buf+len, " Fusion MPT base driver\n"); + + scsi = lan = ctl = targ = dmp = 0; + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + drvname = NULL; + if (MptCallbacks[ii]) { + switch (MptDriverClass[ii]) { + case MPTSCSIH_DRIVER: + if (!scsi++) drvname = "SCSI host"; + break; + case MPTLAN_DRIVER: + if (!lan++) drvname = "LAN"; + break; + case MPTSTM_DRIVER: + if (!targ++) drvname = "SCSI target"; + break; + case MPTCTL_DRIVER: + if (!ctl++) drvname = "ioctl"; + break; + case MPTDMP_DRIVER: + if (!dmp++) drvname = "DMP"; + break; + } - ioc = data; + if (drvname) + len += sprintf(buf+len, " Fusion MPT %s driver\n", drvname); + /* + * Handle isense special case, because it + * doesn't do a formal mpt_register call. + */ + if (isense_idx == ii) + len += sprintf(buf+len, " Fusion MPT isense driver\n"); + } else + break; + } - while (ioc) { - int more = 0; + MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len); +} - mpt_print_ioc_facts(ioc, out, &more, 0); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info. + * @buf: Pointer to area to write information + * @start: Pointer to start pointer + * @offset: Offset to start writing + * @request: + * @eof: Pointer to EOF integer + * @data: Pointer + * + * Returns number of characters written to process performing the read. + */ +static int +procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eof, void *data) +{ + MPT_ADAPTER *ioc = data; + int len; + char expVer[32]; + int sz; + int p; - out += more; - if ((out-page) >= count) { - break; + mpt_get_fw_exp_ver(expVer, ioc); + + len = sprintf(buf, "%s:", ioc->name); + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) + len += sprintf(buf+len, " (f/w download boot flag set)"); +// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL) +// len += sprintf(buf+len, " CONFIG_CHECKSUM_FAIL!"); + + len += sprintf(buf+len, "\n ProductID = 0x%04x (%s)\n", + ioc->facts.ProductID, + ioc->prod_name); + len += sprintf(buf+len, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer); + if (ioc->facts.FWImageSize) + len += sprintf(buf+len, " (fw_size=%d)", ioc->facts.FWImageSize); + len += sprintf(buf+len, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion); + len += sprintf(buf+len, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit); + len += sprintf(buf+len, " EventState = 0x%02x\n", ioc->facts.EventState); + + len += sprintf(buf+len, " CurrentHostMfaHighAddr = 0x%08x\n", + ioc->facts.CurrentHostMfaHighAddr); + len += sprintf(buf+len, " CurrentSenseBufferHighAddr = 0x%08x\n", + ioc->facts.CurrentSenseBufferHighAddr); + + len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); + len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + ioc->req_alloc, (void *)(ulong)ioc->req_alloc_dma); + /* + * Rounding UP to nearest 4-kB boundary here... + */ + sz = (ioc->req_sz * ioc->req_depth) + 128; + sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000; + len += sprintf(buf+len, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n", + ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz); + len += sprintf(buf+len, " {MaxReqSz=%d} {MaxReqDepth=%d}\n", + 4*ioc->facts.RequestFrameSize, + ioc->facts.GlobalCredits); + + len += sprintf(buf+len, " ReplyFrames @ 0x%p (Dma @ 0x%p)\n", + ioc->reply_alloc, (void *)(ulong)ioc->reply_alloc_dma); + sz = (ioc->reply_sz * ioc->reply_depth) + 128; + len += sprintf(buf+len, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", + ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); + len += sprintf(buf+len, " {MaxRepSz=%d} {MaxRepDepth=%d}\n", + ioc->facts.CurReplyFrameSize, + ioc->facts.ReplyQueueDepth); + + len += sprintf(buf+len, " MaxDevices = %d\n", + (ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices); + len += sprintf(buf+len, " MaxBuses = %d\n", ioc->facts.MaxBuses); + + /* per-port info */ + for (p=0; p < ioc->facts.NumberOfPorts; p++) { + len += sprintf(buf+len, " PortNumber = %d (of %d)\n", + p+1, + ioc->facts.NumberOfPorts); + if ((int)ioc->chip_type <= (int)FC929) { + if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) { + u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; + len += sprintf(buf+len, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + a[5], a[4], a[3], a[2], a[1], a[0]); + } + len += sprintf(buf+len, " WWN = %08X%08X:%08X%08X\n", + ioc->fc_port_page0[p].WWNN.High, + ioc->fc_port_page0[p].WWNN.Low, + ioc->fc_port_page0[p].WWPN.High, + ioc->fc_port_page0[p].WWPN.Low); } - ioc = NULL; } - len = out - page; - PROC_MPT_READ_RETURN(page,start,off,count,eof,len); + MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len); } + #endif /* CONFIG_PROC_FS } */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc) { - if ((ioc->facts.FWVersion & 0xF000) == 0xE000) + buf[0] ='\0'; + if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) { sprintf(buf, " (Exp %02d%02d)", - (ioc->facts.FWVersion & 0x0F00) >> 8, /* Month */ - ioc->facts.FWVersion & 0x001F); /* Day */ - else - buf[0] ='\0'; + (ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */ + (ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */ - /* insider hack! */ - if (ioc->facts.FWVersion & 0x0080) { - strcat(buf, " [MDBG]"); + /* insider hack! */ + if ((ioc->facts.FWVersion.Word >> 8) & 0x80) + strcat(buf, " [MDBG]"); } } @@ -3130,8 +4806,8 @@ * @len: Offset at which to start writing in buffer * @showlan: Display LAN stuff? * - * This routine writes (english readable) ASCII text, which represents - * a summary of IOC information, to a buffer. + * This routine writes (english readable) ASCII text, which represents + * a summary of IOC information, to a buffer. */ void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan) @@ -3144,11 +4820,11 @@ /* * Shorter summary of attached ioc's... */ - y = sprintf(buffer+len, "%s: %s, %s%04xh%s, Ports=%d, MaxQ=%d", + y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d", ioc->name, ioc->prod_name, MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */ - ioc->facts.FWVersion, + ioc->facts.FWVersion.Word, expVer, ioc->facts.NumberOfPorts, ioc->req_depth); @@ -3159,8 +4835,11 @@ a[5], a[4], a[3], a[2], a[1], a[0]); } - if (ioc->pci_irq < 100) - y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq); +#ifndef __sparc__ + y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq); +#else + y += sprintf(buffer+len+y, ", IRQ=%s", __irq_itoa(ioc->pci_irq)); +#endif if (!ioc->active) y += sprintf(buffer+len+y, " (disabled)"); @@ -3171,75 +4850,66 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Reset Handling + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_print_ioc_facts - Write ASCII summary of IOC facts to a buffer. + * mpt_HardResetHandler - Generic reset handler, issue SCSI Task + * Management call based on input arg values. If TaskMgmt fails, + * return associated SCSI request. * @ioc: Pointer to MPT_ADAPTER structure - * @buffer: Pointer to buffer where IOC facts should be written - * @size: Pointer to number of bytes we wrote (set by this routine) - * @len: Offset at which to start writing in buffer + * @sleepFlag: Indicates if sleep or schedule must be called. * - * This routine writes (english readable) ASCII text, which represents - * a summary of the IOC facts, to a buffer. + * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) + * or a non-interrupt thread. In the former, must not call schedule(). + * + * Remark: A return of -1 is a FATAL error case, as it means a + * FW reload/initialization failed. + * + * Returns 0 for SUCCESS or -1 if FAILED. */ -void -mpt_print_ioc_facts(MPT_ADAPTER *ioc, char *buffer, int *size, int len) +int +mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) { - char expVer[32]; - char iocName[16]; - int sz; - int y; - int p; - - mpt_get_fw_exp_ver(expVer, ioc); + int rc; + unsigned long flags; - strcpy(iocName, ioc->name); - y = sprintf(buffer+len, "%s:\n", iocName); + dprintk((MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name)); +#ifdef MFCNT + printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name); + printk("MF count 0x%x !\n", ioc->mfcnt); +#endif - y += sprintf(buffer+len+y, " ProductID = 0x%04x\n", ioc->facts.ProductID); - for (p=0; p < ioc->facts.NumberOfPorts; p++) { - y += sprintf(buffer+len+y, " PortNumber = %d (of %d)\n", - p+1, - ioc->facts.NumberOfPorts); - if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) { - u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; - y += sprintf(buffer+len+y, " LanAddr = 0x%02x:%02x:%02x:%02x:%02x:%02x\n", - a[5], a[4], a[3], a[2], a[1], a[0]); - } - } - y += sprintf(buffer+len+y, " FWVersion = 0x%04x%s\n", ioc->facts.FWVersion, expVer); - y += sprintf(buffer+len+y, " MsgVersion = 0x%04x\n", ioc->facts.MsgVersion); - y += sprintf(buffer+len+y, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit); - y += sprintf(buffer+len+y, " EventState = 0x%02x\n", ioc->facts.EventState); - y += sprintf(buffer+len+y, " CurrentHostMfaHighAddr = 0x%08x\n", - ioc->facts.CurrentHostMfaHighAddr); - y += sprintf(buffer+len+y, " CurrentSenseBufferHighAddr = 0x%08x\n", - ioc->facts.CurrentSenseBufferHighAddr); - y += sprintf(buffer+len+y, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); - y += sprintf(buffer+len+y, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + /* Reset the adapter. Prevent more than 1 call to + * mpt_do_ioc_recovery at any instant in time. + */ + spin_lock_irqsave(&ioc->diagLock, flags); + if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ + spin_unlock_irqrestore(&ioc->diagLock, flags); + return 0; + } else { + ioc->diagPending = 1; + } + spin_unlock_irqrestore(&ioc->diagLock, flags); - y += sprintf(buffer+len+y, " RequestFrames @ 0x%p (Dma @ 0x%08x)\n", - ioc->req_alloc, ioc->req_alloc_dma); - /* - * Rounding UP to nearest 4-kB boundary here... + /* FIXME: If do_ioc_recovery fails, repeat.... */ - sz = (ioc->req_sz * ioc->req_depth) + 128; - sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000; - y += sprintf(buffer+len+y, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n", - ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz); - y += sprintf(buffer+len+y, " {MaxReqSz=%d} {MaxReqDepth=%d}\n", - 4*ioc->facts.RequestFrameSize, - ioc->facts.GlobalCredits); - y += sprintf(buffer+len+y, " ReplyFrames @ 0x%p (Dma @ 0x%08x)\n", - ioc->reply_alloc, ioc->reply_alloc_dma); - sz = (ioc->reply_sz * ioc->reply_depth) + 128; - y += sprintf(buffer+len+y, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", - ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); - y += sprintf(buffer+len+y, " {MaxRepSz=%d} {MaxRepDepth=%d}\n", - ioc->facts.CurReplyFrameSize, - ioc->facts.ReplyQueueDepth); + if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { + printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n", + rc, ioc->name); + } - *size = y; + spin_lock_irqsave(&ioc->diagLock, flags); + ioc->diagPending = 0; + if (ioc->alt_ioc) + ioc->alt_ioc->diagPending = 0; + spin_unlock_irqrestore(&ioc->diagLock, flags); + + dprintk((MYIOC_s_INFO_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); + + return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -3268,7 +4938,7 @@ ds = "External Bus Reset"; break; case MPI_EVENT_RESCAN: - ds = "Bus Rescan Event"; + ds = "Bus Rescan Event"; /* Ok, do we need to do anything here? As far as I can tell, this is when a new device gets added to the loop. */ @@ -3296,6 +4966,9 @@ else ds = "Events(OFF) Change"; break; + case MPI_EVENT_INTEGRATED_RAID: + ds = "Integrated Raid"; + break; /* * MPT base "custom" events may be added here... */ @@ -3307,7 +4980,7 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * ProcessEventNotification - Route a received EventNotificationReply to * all currently regeistered event handlers. * @ioc: Pointer to MPT_ADAPTER structure @@ -3322,7 +4995,7 @@ u16 evDataLen; u32 evData0 = 0; // u32 evCtx; - int i; + int ii; int r = 0; int handlers = 0; char *evStr; @@ -3339,15 +5012,15 @@ } evStr = EventDescriptionStr(event, evData0); - dprintk((KERN_INFO MYNAM ": %s: MPT event (%s=%02Xh) detected!\n", + dprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n", ioc->name, evStr, event)); #if defined(MPT_DEBUG) || defined(MPT_DEBUG_EVENTS) printk(KERN_INFO MYNAM ": Event data:\n" KERN_INFO); - for (i = 0; i < evDataLen; i++) - printk(" %08x", le32_to_cpu(pEventReply->Data[i])); + for (ii = 0; ii < evDataLen; ii++) + printk(" %08x", le32_to_cpu(pEventReply->Data[ii])); printk("\n"); #endif @@ -3365,6 +5038,8 @@ case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */ case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */ case MPI_EVENT_LOGOUT: /* 09 */ + case MPI_EVENT_INTEGRATED_RAID: /* 0B */ + case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */ default: break; case MPI_EVENT_EVENT_CHANGE: /* 0A */ @@ -3382,13 +5057,36 @@ } /* + * Should this event be logged? Events are written sequentially. + * When buffer is full, start again at the top. + */ + if (ioc->events && (ioc->eventTypes & ( 1 << event))) { + int idx; + + idx = ioc->eventContext % ioc->eventLogSize; + + ioc->events[idx].event = event; + ioc->events[idx].eventContext = ioc->eventContext; + + for (ii = 0; ii < 2; ii++) { + if (ii < evDataLen) + ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]); + else + ioc->events[idx].data[ii] = 0; + } + + ioc->eventContext++; + } + + + /* * Call each currently registered protocol event handler. */ - for (i=MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { - if (MptEvHandlers[i]) { - dprintk((KERN_INFO MYNAM ": %s: Routing Event to event handler #%d\n", - ioc->name, i)); - r += (*(MptEvHandlers[i]))(ioc, pEventReply); + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + if (MptEvHandlers[ii]) { + dprintk((MYIOC_s_INFO_FMT "Routing Event to event handler #%d\n", + ioc->name, ii)); + r += (*(MptEvHandlers[ii]))(ioc, pEventReply); handlers++; } } @@ -3398,7 +5096,9 @@ * If needed, send (a single) EventAck. */ if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) { - if ((i = SendEventAck(ioc, pEventReply)) != 0) { + if ((ii = SendEventAck(ioc, pEventReply)) != 0) { + printk(MYIOC_s_WARN_FMT "SendEventAck returned %d\n", + ioc->name, ii); } } @@ -3427,7 +5127,7 @@ switch(log_info) { /* FCP Initiator */ - case MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME: + case MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME: desc = "Received an out of order frame - unsupported"; break; case MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_START_OF_FRAME: @@ -3483,7 +5183,7 @@ desc = "Not sent because login to remote node not validated"; break; case MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND: - desc = "Cleared from the outbound after a logout"; + desc = "Cleared from the outbound queue after a logout"; break; case MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN: desc = "Cleared waiting for data after a logout"; @@ -3516,7 +5216,7 @@ break; } - printk(KERN_INFO MYNAM ": %s: LogInfo(0x%08x): SubCl={%s}", + printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubCl={%s}", ioc->name, log_info, subcl_str[subcl]); if (SubCl == MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET) printk(", byte_offset=%d\n", log_info & MPI_IOCLOGINFO_FC_INVALID_FIELD_MAX_OFFSET); @@ -3539,7 +5239,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info) { /* FIXME! */ - printk(KERN_INFO MYNAM ": %s: LogInfo(0x%08x)\n", ioc->name, log_info); + printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x)\n", ioc->name, log_info); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -3553,7 +5253,7 @@ * Specialized driver registration routine for the isense driver. */ int -mpt_register_ascqops_strings(/*ASCQ_Table_t*/void *ascqTable, int ascqtbl_sz, const char **opsTable) +mpt_register_ascqops_strings(void *ascqTable, int ascqtbl_sz, const char **opsTable) { int r = 0; @@ -3562,6 +5262,7 @@ mpt_ASCQ_TableSz = ascqtbl_sz; mpt_ScsiOpcodesPtr = opsTable; printk(KERN_INFO MYNAM ": English readable SCSI-3 strings enabled:-)\n"); + isense_idx = last_drv_idx; r = 1; } MOD_INC_USE_COUNT; @@ -3582,11 +5283,15 @@ mpt_ASCQ_TableSz = 0; mpt_ScsiOpcodesPtr = NULL; printk(KERN_INFO MYNAM ": English readable SCSI-3 strings disabled)-:\n"); + isense_idx = -1; MOD_DEC_USE_COUNT; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +EXPORT_SYMBOL(mpt_adapters); +EXPORT_SYMBOL(mpt_proc_root_dir); +EXPORT_SYMBOL(DmpService); EXPORT_SYMBOL(mpt_register); EXPORT_SYMBOL(mpt_deregister); EXPORT_SYMBOL(mpt_event_register); @@ -3597,12 +5302,16 @@ EXPORT_SYMBOL(mpt_put_msg_frame); EXPORT_SYMBOL(mpt_free_msg_frame); EXPORT_SYMBOL(mpt_send_handshake_request); +EXPORT_SYMBOL(mpt_handshake_req_reply_wait); EXPORT_SYMBOL(mpt_adapter_find_first); EXPORT_SYMBOL(mpt_adapter_find_next); EXPORT_SYMBOL(mpt_verify_adapter); +EXPORT_SYMBOL(mpt_GetIocState); EXPORT_SYMBOL(mpt_print_ioc_summary); EXPORT_SYMBOL(mpt_lan_index); EXPORT_SYMBOL(mpt_stm_index); +EXPORT_SYMBOL(mpt_HardResetHandler); +EXPORT_SYMBOL(mpt_config); EXPORT_SYMBOL(mpt_register_ascqops_strings); EXPORT_SYMBOL(mpt_deregister_ascqops_strings); @@ -3611,12 +5320,13 @@ EXPORT_SYMBOL(mpt_ScsiOpcodesPtr); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * fusion_init - Fusion MPT base driver initialization routine. * * Returns 0 for success, non-zero for failure. */ -int __init fusion_init(void) +int __init +fusion_init(void) { int i; @@ -3636,12 +5346,22 @@ MptResetHandlers[i] = NULL; } + DmpService = NULL; + /* NEW! 20010120 -sralston * Register ourselves (mptbase) in order to facilitate * EventNotification handling. */ mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); + /* Register for hard reset handling callbacks. + */ + if (mpt_reset_register(mpt_base_index, mpt_ioc_reset) == 0) { + dprintk((KERN_INFO MYNAM ": Register for IOC reset notification\n")); + } else { + /* FIXME! */ + } + if ((i = mpt_pci_scan()) < 0) return i; @@ -3649,13 +5369,14 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * fusion_exit - Perform driver unload cleanup. * * This routine frees all resources associated with each MPT adapter * and removes all %MPT_PROCFS_MPTBASEDIR entries. */ -static void fusion_exit(void) +static void +fusion_exit(void) { MPT_ADAPTER *this; @@ -3665,7 +5386,7 @@ * Moved this *above* removal of all MptAdapters! */ #ifdef CONFIG_PROC_FS - procmpt_destroy(); + (void) procmpt_destroy(); #endif while (! Q_IS_EMPTY(&MptAdapters)) { @@ -3673,6 +5394,8 @@ Q_DEL_ITEM(this); mpt_adapter_dispose(this); } + + mpt_reset_deregister(mpt_base_index); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff -Nru a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h --- a/drivers/message/fusion/mptbase.h Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/mptbase.h Fri Apr 26 00:01:26 2002 @@ -8,11 +8,12 @@ * Credits: * (see mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptbase.h,v 1.46.2.2.2.2 2001/09/18 03:22:29 sralston Exp $ + * $Id: mptbase.h,v 1.103 2002/02/27 20:24:38 pdelaney Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -55,6 +56,7 @@ /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include "linux_compat.h" /* linux-2.2.x (vs. -2.4.x) tweaks */ +#include "scsi3.h" /* SCSI defines */ #include "lsi/mpi_type.h" #include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */ @@ -62,6 +64,7 @@ #include "lsi/mpi_cnfg.h" /* IOC configuration support */ #include "lsi/mpi_init.h" /* SCSI Host (initiator) protocol support */ #include "lsi/mpi_lan.h" /* LAN over FC protocol support */ +#include "lsi/mpi_raid.h" /* Integrated Mirroring support */ #include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */ #include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */ @@ -74,11 +77,11 @@ #endif #ifndef COPYRIGHT -#define COPYRIGHT "Copyright (c) 1999-2001 " MODULEAUTHOR +#define COPYRIGHT "Copyright (c) 1999-2002 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "1.02.02" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-1.02.02" +#define MPT_LINUX_VERSION_COMMON "2.00.11" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-2.00.11" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -89,39 +92,77 @@ * Fusion MPT(linux) driver configurable stuff... */ #define MPT_MAX_ADAPTERS 16 -#define MPT_MAX_PROTOCOL_DRIVERS 8 +#define MPT_MAX_PROTOCOL_DRIVERS 16 +#define MPT_MAX_BUS 1 #define MPT_MAX_FC_DEVICES 255 +#define MPT_MAX_SCSI_DEVICES 16 +#define MPT_LAST_LUN 31 +#define MPT_SENSE_BUFFER_ALLOC 64 + /* allow for 256 max sense alloc, but only 255 max request */ +#if MPT_SENSE_BUFFER_ALLOC >= 256 +# undef MPT_SENSE_BUFFER_ALLOC +# define MPT_SENSE_BUFFER_ALLOC 256 +# define MPT_SENSE_BUFFER_SIZE 255 +#else +# define MPT_SENSE_BUFFER_SIZE MPT_SENSE_BUFFER_ALLOC +#endif -#define MPT_MISCDEV_BASENAME "mptctl" -#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME +#define MPT_NAME_LENGTH 32 #define MPT_PROCFS_MPTBASEDIR "mpt" /* chg it to "driver/fusion" ? */ -#define MPT_PROCFS_SUMMARY_NODE MPT_PROCFS_MPTBASEDIR "/summary" -#define MPT_PROCFS_SUMMARY_PATHNAME "/proc/" MPT_PROCFS_SUMMARY_NODE -#define MPT_FW_REV_MAGIC_ID_STRING "FwRev=" +#define MPT_PROCFS_SUMMARY_ALL_NODE MPT_PROCFS_MPTBASEDIR "/summary" +#define MPT_PROCFS_SUMMARY_ALL_PATHNAME "/proc/" MPT_PROCFS_SUMMARY_ALL_NODE +#define MPT_FW_REV_MAGIC_ID_STRING "FwRev=" -#ifdef __KERNEL__ /* { */ #define MPT_MAX_REQ_DEPTH 1023 -#define MPT_REQ_DEPTH 256 +#define MPT_DEFAULT_REQ_DEPTH 256 #define MPT_MIN_REQ_DEPTH 128 #define MPT_MAX_REPLY_DEPTH MPT_MAX_REQ_DEPTH -#define MPT_REPLY_DEPTH 128 +#define MPT_DEFAULT_REPLY_DEPTH 128 #define MPT_MIN_REPLY_DEPTH 8 #define MPT_MAX_REPLIES_PER_ISR 32 #define MPT_MAX_FRAME_SIZE 128 -#define MPT_REQ_SIZE 128 -#define MPT_REPLY_SIZE 128 +#define MPT_DEFAULT_FRAME_SIZE 128 -#define MPT_SG_BUCKETS_PER_HUNK 1 +#define MPT_SG_REQ_128_SCALE 1 +#define MPT_SG_REQ_96_SCALE 2 +#define MPT_SG_REQ_64_SCALE 4 -#ifdef MODULE -#define MPT_REQ_DEPTH_RANGE_STR __MODULE_STRING(MPT_MIN_REQ_DEPTH) "-" __MODULE_STRING(MPT_MAX_REQ_DEPTH) -#define MPT_REPLY_DEPTH_RANGE_STR __MODULE_STRING(MPT_MIN_REPLY_DEPTH) "-" __MODULE_STRING(MPT_MAX_REPLY_DEPTH) -#define MPT_REPLY_SIZE_RANGE_STR __MODULE_STRING(MPT_MIN_REPLY_SIZE) "-" __MODULE_STRING(MPT_MAX_FRAME_SIZE) -#endif +#define CAN_SLEEP 1 +#define NO_SLEEP 0 + +/* + * SCSI transfer rate defines. + */ +#define MPT_ULTRA320 0x08 +#define MPT_ULTRA160 0x09 +#define MPT_ULTRA2 0x0A +#define MPT_ULTRA 0x0C +#define MPT_FAST 0x19 +#define MPT_SCSI 0x32 +#define MPT_ASYNC 0xFF + +#define MPT_NARROW 0 +#define MPT_WIDE 1 + +#ifdef __KERNEL__ /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Attempt semi-consistent error & warning msgs across + * MPT drivers. NOTE: Users of these macro defs must + * themselves define their own MYNAM. + */ +#define MYIOC_s_INFO_FMT KERN_INFO MYNAM ": %s: " +#define MYIOC_s_NOTE_FMT KERN_NOTICE MYNAM ": %s: " +#define MYIOC_s_WARN_FMT KERN_WARNING MYNAM ": %s: WARNING - " +#define MYIOC_s_ERR_FMT KERN_ERR MYNAM ": %s: ERROR - " /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -133,6 +174,7 @@ MPTSCSIH_DRIVER, /* MPT SCSI host (initiator) class */ MPTLAN_DRIVER, /* MPT LAN class */ MPTSTM_DRIVER, /* MPT SCSI target mode class */ + MPTDMP_DRIVER, /* MPT Dynamic Multi-pathing class */ MPTUNKNOWN_DRIVER } MPT_DRIVER_CLASS; @@ -145,10 +187,21 @@ struct _MPT_FRAME_HDR *forw; struct _MPT_FRAME_HDR *back; u32 arg1; + u32 pad; void *argp1; +#ifndef MPT_SCSI_USE_NEW_EH + void *argp2; +#endif } linkage; /* - * NOTE: On non-32-bit systems, where pointers are LARGE, + * NOTE: When request frames are free, on the linkage structure + * contets are valid. All other values are invalid. + * In particular, do NOT reply on offset [2] + * (in words) being the * message context. + * The message context must be reset (computed via base address + * + an offset) prior to issuing any command. + * + * NOTE2: On non-32-bit systems, where pointers are LARGE, * using the linkage pointers destroys our sacred MsgContext * field contents. But we don't care anymore because these * are now reset in mpt_put_msg_frame() just prior to sending @@ -169,6 +222,12 @@ } fld; } msgctxu; } hwhdr; + /* + * Remark: 32 bit identifier: + * 31-24: reserved + * 23-16: call back index + * 15-0 : request index + */ } MPT_FRAME_TRACKER; /* @@ -189,6 +248,11 @@ } u; } MPT_FRAME_HDR; +#define MPT_REQ_MSGFLAGS_DROPME 0x80 + +/* Used for tracking the free request frames + * and free reply frames. + */ typedef struct _MPT_Q_TRACKER { MPT_FRAME_HDR *head; MPT_FRAME_HDR *tail; @@ -214,9 +278,20 @@ struct _Q_ITEM *tail; } Q_TRACKER; +typedef struct _MPT_DONE_Q { + struct _MPT_DONE_Q *forw; + struct _MPT_DONE_Q *back; + void *argp; +} MPT_DONE_Q; + +typedef struct _DONE_Q_TRACKER { + MPT_DONE_Q *head; + MPT_DONE_Q *tail; +} DONE_Q_TRACKER; /* - * Chip-specific stuff... + * Chip-specific stuff... FC929 delineates break between + * FC and Parallel SCSI parts. Do NOT re-order. */ typedef enum { @@ -237,7 +312,9 @@ u32 WriteSequence; /* 04 Write Sequence register */ u32 Diagnostic; /* 08 Diagnostic register */ u32 TestBase; /* 0C Test Base Address */ - u32 Reserved1[8]; /* 10-2F reserved for future use */ + u32 DiagRwData; /* 10 Read Write Data (fw download) */ + u32 DiagRwAddress; /* 14 Read Write Address (fw download)*/ + u32 Reserved1[6]; /* 18-2F reserved for future use */ u32 IntStatus; /* 30 Interrupt Status */ u32 IntMask; /* 34 Interrupt Mask */ u32 Reserved2[2]; /* 38-3F reserved for future use */ @@ -256,60 +333,271 @@ */ +/* + * Dynamic Multi-Pathing specific stuff... + */ +#define DMP_MAX_PATHS 8 + +typedef struct _PathInfo { + u8 ioc; + u8 target; + u8 pad; + u8 pflags; +} PathInfo; + +#define PATHINFO_FLAGS_OWNED 0x01 +#define PATHINFO_FLAGS_EXISTS 0x02 +#define PATHINFO_FLAGS_AVAILABLE 0x04 +#define PATHINFO_FLAGS_SECONDARY 0x08 + +#define PFLAGS_EXISTS_AND_AVAIL (PATHINFO_FLAGS_EXISTS|PATHINFO_FLAGS_AVAILABLE) +#define PFLAGS_AVAIL_AND_OWNED (PATHINFO_FLAGS_AVAILABLE|PATHINFO_FLAGS_OWNED) + +typedef struct _ScsiCmndTracker { + void *head; + void *tail; +} ScsiCmndTracker; + + +/* + * VirtDevice - FC LUN device or SCSI target device + * (used to be FCSCSI_TARGET) + */ +typedef struct _VirtDevice { + struct _VirtDevice *forw; + struct _VirtDevice *back; + rwlock_t VdevLock; + int ref_cnt; + u8 tflags; + u8 ioc_id; + u8 target_id; + u8 bus_id; + u8 minSyncFactor; /* 0xFF is async */ + u8 maxOffset; /* 0 if async */ + u8 maxWidth; /* 0 if narrow, 1 if wide*/ + u8 negoFlags; /* 0 if WDTR/SDTR allowed */ + u8 raidVolume; /* set, if RAID Volume */ + u8 rsvd; /* alignment */ + u16 rsvd1raid; + int npaths; + u16 fc_phys_lun; + u16 fc_xlat_lun; + int stall_detected; + PathInfo path[DMP_MAX_PATHS]; + struct timer_list stall_timer; + struct timer_list retry_timer; + struct timer_list gone_timer; + ScsiCmndTracker WaitQ; + ScsiCmndTracker SentQ; + ScsiCmndTracker DoneQ; +//--- LUN split here? + u8 sense[SCSI_STD_SENSE_BYTES]; /* 18 */ + u8 rsvd2[2]; /* alignment */ + u32 luns; /* Max LUNs is 32 */ + u8 inq_data[SCSI_STD_INQUIRY_BYTES]; /* 36 */ + u8 pad0[4]; + u8 uniq_prepad[8]; + u8 inq00_data[20]; + u8 pad1[4]; + /* IEEE Registered Extended Identifier + obtained via INQUIRY VPD page 0x83 */ + u8 uniq_data[20]; + u8 pad2[4]; + u8 inqC3_data[12]; + u8 pad3[4]; + u8 inqC9_data[12]; + u8 pad4[4]; + u8 dev_vol_name[64]; +} VirtDevice; + +/* + * Fibre Channel (SCSI) target device and associated defines... + */ +#define MPT_TARGET_DEFAULT_DV_STATUS 0 +#define MPT_TARGET_FLAGS_VALID_NEGO 0x01 +#define MPT_TARGET_FLAGS_VALID_INQUIRY 0x02 +#define MPT_TARGET_FLAGS_VALID_SENSE 0x04 +#define MPT_TARGET_FLAGS_Q_YES 0x08 + +#define MPT_TARGET_NO_NEGO_WIDE 0x01 +#define MPT_TARGET_NO_NEGO_SYNC 0x02 + +typedef struct _VirtDevTracker { + struct _VirtDevice *head; + struct _VirtDevice *tail; + rwlock_t VlistLock; + int pad; +} VirtDevTracker; + + +/* + * /proc/mpt interface + */ +typedef struct { + const char *name; + mode_t mode; + int pad; + read_proc_t *read_proc; + write_proc_t *write_proc; +} mpt_proc_entry_t; + +#define MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len) \ +do { \ + len -= offset; \ + if (len < request) { \ + *eof = 1; \ + if (len <= 0) \ + return 0; \ + } else \ + len = request; \ + *start = buf + offset; \ + return len; \ +} while (0) + + +/* + * IOCTL structure and associated defines + */ + +#define MPT_IOCTL_STATUS_DID_TIMEOUT 0x01 /* The current IOCTL timed out */ +#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ +#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */ +#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */ +#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ + +typedef struct _MPT_IOCTL { + struct _MPT_ADAPTER *ioc; + struct timer_list timer; /* timer function for this adapter */ + u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ + u8 sense[MPT_SENSE_BUFFER_ALLOC]; + int wait_done; /* wake-up value for this ioc */ + u8 cmd; /* current command */ + u8 status; /* current command status */ + u8 pad[2]; +} MPT_IOCTL; + +/* + * Event Structure and define + */ +#define MPTCTL_EVENT_LOG_SIZE (0x0000000A) +typedef struct _mpt_ioctl_events { + u32 event; /* Specified by define above */ + u32 eventContext; /* Index or counter */ + int data[2]; /* First 8 bytes of Event Data */ +} MPT_IOCTL_EVENTS; + +/* + * CONFIGPARM status defines + */ +#define MPT_CONFIG_GOOD MPI_IOCSTATUS_SUCCESS +#define MPT_CONFIG_ERROR 0x002F + +/* + * Substructure to store SCSI specific configuration page data + */ +#define MPT_SCSICFG_NEGOTIATE 0x01 /* Negotiate on next IO */ +#define MPT_SCSICFG_NEED_DV 0x02 /* Schedule DV */ +#define MPT_SCSICFG_DV_PENDING 0x04 /* DV on this physical id pending */ +#define MPT_SCSICFG_DV_DONE 0x08 /* DV on this physical id complete */ + +#define MPT_SCSICFG_USE_NVRAM 0x01 /* WriteSDP1 using NVRAM */ +#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */ + +typedef struct _ScsiCfgData { + int *nvram; /* table of device NVRAM values */ + IOCPage3_t *pIocPg3; /* table of physical disks */ + u8 dvStatus[MPT_MAX_SCSI_DEVICES]; + int isRaid; /* bit field, 1 if RAID */ + u8 minSyncFactor; /* 0xFF if async */ + u8 maxSyncOffset; /* 0 if async */ + u8 maxBusWidth; /* 0 if narrow, 1 if wide */ + u8 busType; /* SE, LVD, HD */ + u8 sdp1version; /* SDP1 version */ + u8 sdp1length; /* SDP1 length */ + u8 sdp0version; /* SDP0 version */ + u8 sdp0length; /* SDP0 length */ + u8 dvScheduled; /* 1 if scheduled */ + u8 forceDv; /* 1 to force DV scheduling */ + u8 rsvd[2]; +} ScsiCfgData; + +/* + * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS + */ typedef struct _MPT_ADAPTER { struct _MPT_ADAPTER *forw; struct _MPT_ADAPTER *back; - int id; /* Unique adapter id {0,1,2,...} */ - int pci_irq; - char name[32]; /* "iocN" */ + int id; /* Unique adapter id N {0,1,2,...} */ + int pci_irq; /* This irq */ + char name[MPT_NAME_LENGTH]; /* "iocN" */ char *prod_name; /* "LSIFC9x9" */ - u32 mem_phys; /* == f4020000 (mmap) */ volatile SYSIF_REGS *chip; /* == c8817000 (mmap) */ - CHIP_TYPE chip_type; - int mem_size; + volatile SYSIF_REGS *pio_chip; /* Programmed IO (downloadboot) */ + u32 mem_phys; /* == f4020000 (mmap) */ + u32 pio_mem_phys; /* Programmed IO (downloadboot) */ + int mem_size; /* mmap memory size */ int alloc_total; u32 last_state; int active; - int sod_reset; - unsigned long last_kickstart; - u8 *reply_alloc; /* Reply frames alloc ptr */ + u8 *reply_alloc; /* Reply frames alloc ptr */ dma_addr_t reply_alloc_dma; - MPT_FRAME_HDR *reply_frames; /* Reply frames - rounded up! */ + MPT_FRAME_HDR *reply_frames; /* Reply msg frames - rounded up! */ dma_addr_t reply_frames_dma; - int reply_depth; - int reply_sz; + u32 reply_frames_low_dma; + int reply_depth; /* Num Allocated reply frames */ + int reply_sz; /* Reply frame size */ + CHIP_TYPE chip_type; /* We (host driver) get to manage our own RequestQueue! */ - u8 *req_alloc; /* Request frames alloc ptr */ + u8 *req_alloc; /* Request frames alloc ptr */ dma_addr_t req_alloc_dma; - MPT_FRAME_HDR *req_frames; /* Request msg frames for PULL mode! */ + MPT_FRAME_HDR *req_frames; /* Request msg frames - rounded up! */ dma_addr_t req_frames_dma; - int req_depth; - int req_sz; - MPT_Q_TRACKER FreeQ; + u32 req_frames_low_dma; + int req_depth; /* Number of request frames */ + int req_sz; /* Request frame size (bytes) */ spinlock_t FreeQlock; + MPT_Q_TRACKER FreeQ; /* Pool of SCSI sense buffers for commands coming from * the SCSI mid-layer. We have one 256 byte sense buffer * for each REQ entry. */ u8 *sense_buf_pool; dma_addr_t sense_buf_pool_dma; - struct pci_dev *pcidev; -/* atomic_t userCnt; */ - u8 *memmap; + u32 sense_buf_low_dma; int mtrr_reg; - struct Scsi_Host *sh; + void *pcidev; /* struct pci_dev pointer */ + u8 *memmap; /* mmap address */ + struct Scsi_Host *sh; /* Scsi Host pointer */ + ScsiCfgData spi_data; /* Scsi config. data */ + MPT_IOCTL *ioctl; /* ioctl data pointer */ struct proc_dir_entry *ioc_dentry; - struct _MPT_ADAPTER *alt_ioc; + struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ + spinlock_t diagLock; /* diagnostic reset lock */ + int diagPending; + u32 biosVersion; /* BIOS version from IO Unit Page 2 */ + int eventTypes; /* Event logging parameters */ + int eventContext; /* Next event context */ + int eventLogSize; /* Max number of cached events */ + struct _mpt_ioctl_events *events; /* pointer to event log */ + u8 *FWImage; /* Pointer to FW */ + dma_addr_t FWImage_dma; + Q_TRACKER configQ; /* linked list of config. requests */ int hs_reply_idx; +#ifndef MFCNT + u32 pad0; +#else + u32 mfcnt; +#endif u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)]; u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)]; IOCFactsReply_t facts; PortFactsReply_t pfacts[2]; + FCPortPage0_t fc_port_page0[2]; LANPage0_t lan_cnfg_page0; LANPage1_t lan_cnfg_page1; u8 FirstWhoInit; - u8 pad1[3]; + u8 pad1[7]; } MPT_ADAPTER; @@ -324,7 +612,6 @@ * 0 = not Ok ... */ typedef int (*MPT_CALLBACK)(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); - typedef int (*MPT_EVHANDLER)(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply); typedef int (*MPT_RESETHANDLER)(MPT_ADAPTER *ioc, int reset_phase); /* reset_phase defs */ @@ -344,6 +631,47 @@ #define MPT_HOSTEVENT_IOC_BRINGUP 0x91 #define MPT_HOSTEVENT_IOC_RECOVER 0x92 +/* 32 vs 64 bit SGL code. + * + */ +#if defined(__ia64__) +typedef SGESimple64_t MptSge_t; +typedef SGEChain64_t MptChain_t; + +#define cpu_to_leXX(y, p) { \ + u32 low = (u32) (y & 0xFFFFFFFF); \ + u32 high = (u32) (y >> 32); \ + p.Low = cpu_to_le32(low); \ + p.High = cpu_to_le32(high); \ +} + +#define leXX_to_cpu(y, p) { \ + y = (dma_addr_t) le32_to_cpu(p.High); \ + y = (y << 32); \ + y |= le32_to_cpu(p.Low); \ +} + +#define MPT_SGE_ADDRESS_SIZE MPI_SGE_FLAGS_64_BIT_ADDRESSING +#define MPT_SCSIIO_MSG_FLAGS MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 + + +#else + +typedef SGESimple32_t MptSge_t; +typedef SGEChain32_t MptChain_t; +#define cpu_to_leXX(y,p) { \ + p = cpu_to_le32(y); \ +} + +#define leXX_to_cpu(y,p) { \ + y = le32_to_cpu(p); \ +} + +#define MPT_SGE_ADDRESS_SIZE MPI_SGE_FLAGS_32_BIT_ADDRESSING +#define MPT_SCSIIO_MSG_FLAGS MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 + +#endif + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Funky (private) macros... @@ -360,7 +688,8 @@ #define dhsprintk(x) #endif -#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME) +//#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME) +#if defined(MPT_DEBUG_MSG_FRAME) #define dmfprintk(x) printk x #else #define dmfprintk(x) @@ -372,24 +701,35 @@ #define dirqprintk(x) #endif -#ifdef MPT_DEBUG_EVENTS -#define deventprintk(x) printk x +#ifdef MPT_DEBUG_SG +#define dsgprintk(x) printk x #else -#define deventprintk(x) +#define dsgprintk(x) #endif -#ifdef MPT_DEBUG_SPINLOCK -#define dslprintk(x) printk x +#ifdef MPT_DEBUG_DV +#define ddvprintk(x) printk x #else -#define dslprintk(x) +#define ddvprintk(x) #endif -#ifdef MPT_DEBUG_SG -#define dsgprintk(x) printk x +#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) +#define ddvtprintk(x) printk x #else -#define dsgprintk(x) +#define ddvtprintk(x) #endif +#ifdef MPT_DEBUG_IOCTL +#define dctlprintk(x) printk x +#else +#define dctlprintk(x) +#endif + +#ifdef MPT_DEBUG_RESET +#define dtmprintk(x) printk x +#else +#define dtmprintk(x) +#endif #define MPT_INDEX_2_MFPTR(ioc,idx) \ (MPT_FRAME_HDR*)( (u8*)(ioc)->req_frames + (ioc)->req_sz * (idx) ) @@ -397,6 +737,9 @@ #define MFPTR_2_MPT_INDEX(ioc,mf) \ (int)( ((u8*)mf - (u8*)(ioc)->req_frames) / (ioc)->req_sz ) +#define MPT_INDEX_2_RFPTR(ioc,idx) \ + (MPT_FRAME_HDR*)( (u8*)(ioc)->reply_frames + (ioc)->req_sz * (idx) ) + #define Q_INIT(q,type) (q)->head = (q)->tail = (type*)(q) #define Q_IS_EMPTY(q) ((Q_ITEM*)(q)->head == (Q_ITEM*)(q)) @@ -425,7 +768,6 @@ _forw->back = _back; \ } - #define SWAB4(value) \ (u32)( (((value) & 0x000000ff) << 24) \ | (((value) & 0x0000ff00) << 8) \ @@ -457,64 +799,143 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -#endif /* } __KERNEL__ */ +/* + * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers + * Private to the driver. + */ +/* LOCAL structure and fields used when processing + * internally generated commands. These include: + * bus scan, dv and config requests. + */ +typedef struct _MPT_LOCAL_REPLY { + ConfigPageHeader_t header; + int completion; + u8 sense[SCSI_STD_SENSE_BYTES]; + u8 scsiStatus; + u8 skip; + u32 pad; +} MPT_LOCAL_REPLY; + +#define MPT_HOST_BUS_UNKNOWN (0xFF) +#define MPT_HOST_TOO_MANY_TM (0x05) +#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) +#define MPT_HOST_NO_CHAIN (0xFFFFFFFF) +#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF) +#define MPT_NVRAM_SYNC_MASK (0x0000FF00) +#define MPT_NVRAM_SYNC_SHIFT (8) +#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000) +#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000) +#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000) +#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000) +#define MPT_NVRAM_WIDE_DISABLE (0x00100000) +#define MPT_NVRAM_BOOT_CHOICE (0x00200000) + +typedef struct _MPT_SCSI_HOST { + MPT_ADAPTER *ioc; + int port; + u32 pad0; + struct scsi_cmnd **ScsiLookup; + /* Pool of buffers for chaining. ReqToChain + * and ChainToChain track index of chain buffers. + * ChainBuffer (DMA) virt/phys addresses. + * FreeChainQ (lock) locking mechanisms. + */ + int *ReqToChain; + int *ChainToChain; + u8 *ChainBuffer; + dma_addr_t ChainBufferDMA; + MPT_Q_TRACKER FreeChainQ; + spinlock_t FreeChainQlock; + u32 qtag_tick; + VirtDevice **Targets; + MPT_LOCAL_REPLY *pLocal; /* used for internal commands */ + struct timer_list timer; + struct timer_list TMtimer; /* Timer for TM commands ONLY */ + /* Pool of memory for holding SCpnts before doing + * OS callbacks. freeQ is the free pool. + */ + u8 *memQ; + DONE_Q_TRACKER freeQ; + DONE_Q_TRACKER doneQ; /* Holds Linux formmatted requests */ + DONE_Q_TRACKER pendingQ; /* Holds MPI formmatted requests */ + MPT_Q_TRACKER taskQ; /* TM request Q */ + spinlock_t freedoneQlock; + int taskQcnt; + u8 numTMrequests; + u8 tmPending; + u8 resetPending; + u8 is_spi; /* Parallel SCSI i/f */ + u8 negoNvram; /* DV disabled, nego NVRAM */ + u8 is_multipath; /* Multi-path compatible */ + u8 rsvd[2]; + MPT_FRAME_HDR *tmPtr; /* Ptr to TM request*/ + MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ + struct scsi_cmnd *abortSCpnt; + MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ +} MPT_SCSI_HOST; + +/* + * Structure for overlaying onto scsi_cmnd->SCp area + * NOTE: SCp area is 36 bytes min, 44 bytes max? + */ +typedef struct _scPrivate { + struct scsi_cmnd *forw; + struct scsi_cmnd *back; + void *p1; + void *p2; + u8 io_path_id; /* DMP */ + u8 pad[7]; +} scPrivate; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * More Dynamic Multi-Pathing stuff... + */ + +/* Forward decl, a strange C thing, to prevent gcc compiler warnings */ +struct scsi_cmnd; /* - * MPT Control IOCTLs and structures + * DMP service layer structure / API interface */ -#define MPT_MAGIC_NUMBER 'm' -#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w) -#define MPTRWPERF_CHK _IOR(MPT_MAGIC_NUMBER,13,struct mpt_raw_r_w) -#define MPTRWPERF_RESET _IOR(MPT_MAGIC_NUMBER,14,struct mpt_raw_r_w) -#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer) -#define MPTSCSICMD _IOWR(MPT_MAGIC_NUMBER,16,struct mpt_scsi_cmd) - -/* - * Define something *vague* enough that caller doesn't - * really need to know anything about device parameters - * (blk_size, capacity, etc.) - */ -struct mpt_raw_r_w { - unsigned int iocnum; /* IOC unit number */ - unsigned int port; /* IOC port number */ - unsigned int target; /* SCSI Target */ - unsigned int lun; /* SCSI LUN */ - unsigned int iters; /* N iterations */ - unsigned short nblks; /* number of blocks per IO */ - unsigned short qdepth; /* max Q depth on this device */ - unsigned char range; /* 0-100% of FULL disk capacity, 0=use (nblks X iters) */ - unsigned char skip; /* % of disk to skip */ - unsigned char rdwr; /* 0-100%, 0=pure ReaDs, 100=pure WRites */ - unsigned char seqran; /* 0-100%, 0=pure SEQential, 100=pure RANdom */ - unsigned int cache_sz; /* In Kb! Optimize hits to N Kb cache size */ -}; - -struct mpt_fw_xfer { - unsigned int iocnum; /* IOC unit number */ -/* u8 flags;*/ /* Message flags - bit field */ - unsigned int fwlen; - void *bufp; /* Pointer to firmware buffer */ -}; - -struct mpt_scsi_cmd { - unsigned int iocnum; /* IOC unit number */ - unsigned int port; /* IOC port number */ - unsigned int target; /* SCSI Target */ - unsigned int lun; /* SCSI LUN */ - SCSIIORequest_t scsi_req; - SCSIIOReply_t scsi_reply; -}; - -struct mpt_ioctl_sanity { - unsigned int iocnum; -}; +typedef struct _DmpServices { + VirtDevTracker VdevList; + struct semaphore *Daemon; + int (*ScsiPathSelect) + (struct scsi_cmnd *, MPT_SCSI_HOST **hd, int *target, int *lun); + int (*DmpIoDoneChk) + (MPT_SCSI_HOST *, struct scsi_cmnd *, + SCSIIORequest_t *, + SCSIIOReply_t *); + void (*mptscsih_scanVlist) + (MPT_SCSI_HOST *, int portnum); + int (*ScsiAbort) + (struct scsi_cmnd *); + int (*ScsiBusReset) + (struct scsi_cmnd *); +} DmpServices_t; -#ifdef __KERNEL__ /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Generic structure passed to the base mpt_config function. + */ +typedef struct _x_config_parms { + Q_ITEM linkage; /* linked list */ + struct timer_list timer; /* timer function for this request */ + ConfigPageHeader_t *hdr; + dma_addr_t physAddr; + int wait_done; /* wait for this request */ + u32 pageAddr; /* properly formatted */ + u8 action; + u8 dir; + u8 timeout; /* seconds */ + u8 pad1; + u16 status; + u16 pad2; +} CONFIGPARMS; +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Public entry points... */ @@ -524,21 +945,28 @@ extern void mpt_event_deregister(int cb_idx); extern int mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func); extern void mpt_reset_deregister(int cb_idx); -extern int mpt_register_ascqops_strings(/*ASCQ_Table_t*/void *ascqTable, int ascqtbl_sz, const char **opsTable); +extern int mpt_register_ascqops_strings(void *ascqTable, int ascqtbl_sz, const char **opsTable); extern void mpt_deregister_ascqops_strings(void); extern MPT_FRAME_HDR *mpt_get_msg_frame(int handle, int iocid); extern void mpt_free_msg_frame(int handle, int iocid, MPT_FRAME_HDR *mf); extern void mpt_put_msg_frame(int handle, int iocid, MPT_FRAME_HDR *mf); -extern int mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req); +extern int mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req, int sleepFlag); +extern int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait, int sleepFlag); extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); extern MPT_ADAPTER *mpt_adapter_find_first(void); extern MPT_ADAPTER *mpt_adapter_find_next(MPT_ADAPTER *prev); +extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); -extern void mpt_print_ioc_facts(MPT_ADAPTER *ioc, char *buf, int *size, int len); +extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); +extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); /* * Public data decl's... */ +extern MPT_ADAPTER *mpt_adapters[MPT_MAX_ADAPTERS]; +extern struct proc_dir_entry *mpt_proc_root_dir; +extern DmpServices_t *DmpService; + extern int mpt_lan_index; /* needed by mptlan.c */ extern int mpt_stm_index; /* needed by mptstm.c */ @@ -563,7 +991,7 @@ #define offsetof(t, m) ((size_t) (&((t *)0)->m)) #endif -#if defined(__alpha__) || defined(__sparc_v9__) +#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) #define CAST_U32_TO_PTR(x) ((void *)(u64)x) #define CAST_PTR_TO_U32(x) ((u32)(u64)x) #else @@ -576,6 +1004,40 @@ ((pflags) & MPI_PORTFACTS_PROTOCOL_TARGET) ? 'T' : 't', \ ((pflags) & MPI_PORTFACTS_PROTOCOL_LAN) ? 'L' : 'l', \ ((pflags) & MPI_PORTFACTS_PROTOCOL_LOGBUSADDR) ? 'B' : 'b' + +/* + * Shifted SGE Defines - Use in SGE with FlagsLength member. + * Otherwise, use MPI_xxx defines (refer to "lsi/mpi.h" header). + * Defaults: 32 bit SGE, SYSTEM_ADDRESS if direction bit is 0, read + */ +#define MPT_TRANSFER_IOC_TO_HOST (0x00000000) +#define MPT_TRANSFER_HOST_TO_IOC (0x04000000) +#define MPT_SGE_FLAGS_LAST_ELEMENT (0x80000000) +#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) +#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) +#define MPT_SGE_FLAGS_DIRECTION (0x04000000) +#define MPT_SGE_FLAGS_ADDRESSING (MPT_SGE_ADDRESS_SIZE << MPI_SGE_FLAGS_SHIFT) +#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) + +#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) +#define MPT_SGE_FLAGS_SIMPLE_ELEMENT (0x10000000) +#define MPT_SGE_FLAGS_CHAIN_ELEMENT (0x30000000) +#define MPT_SGE_FLAGS_ELEMENT_MASK (0x30000000) + +#define MPT_SGE_FLAGS_SSIMPLE_READ \ + (MPT_SGE_FLAGS_LAST_ELEMENT | \ + MPT_SGE_FLAGS_END_OF_BUFFER | \ + MPT_SGE_FLAGS_END_OF_LIST | \ + MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPT_SGE_FLAGS_ADDRESSING | \ + MPT_TRANSFER_IOC_TO_HOST) +#define MPT_SGE_FLAGS_SSIMPLE_WRITE \ + (MPT_SGE_FLAGS_LAST_ELEMENT | \ + MPT_SGE_FLAGS_END_OF_BUFFER | \ + MPT_SGE_FLAGS_END_OF_LIST | \ + MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPT_SGE_FLAGS_ADDRESSING | \ + MPT_TRANSFER_HOST_TO_IOC) /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #endif diff -Nru a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c --- a/drivers/message/fusion/mptctl.c Fri Apr 26 00:01:26 2002 +++ b/drivers/message/fusion/mptctl.c Fri Apr 26 00:01:26 2002 @@ -9,6 +9,12 @@ * This driver would not exist if not for Alan Cox's development * of the linux i2o driver. * + * A special thanks to Pamela Delaney (LSI Logic) for tons of work + * and countless enhancements while adding support for the 1030 + * chip family. Pam has been instrumental in the development of + * of the 2.xx.xx series fusion drivers, and her contributions are + * far too numerous to hope to list in one place. + * * A huge debt of gratitude is owed to David S. Miller (DaveM) * for fixing much of the stupid and broken stuff in the early * driver while porting to sparc64 platform. THANK YOU! @@ -18,16 +24,17 @@ * (plus Eddie's other helpful hints and insights) * * Thanks to Arnaldo Carvalho de Melo for finding and patching - * a potential memory leak in mpt_ioctl_do_fw_download(), + * a potential memory leak in mptctl_do_fw_download(), * and for some kmalloc insight:-) * * (see also mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston, Noah Romer - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptctl.c,v 1.25.4.1 2001/08/24 20:07:06 sralston Exp $ + * $Id: mptctl.c,v 1.52 2002/02/27 18:44:24 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -69,7 +76,6 @@ #include #include #include -#include #include #include #include @@ -80,11 +86,16 @@ #include #include -#include +#include /* needed for access to Scsi_Host struct */ +#include +#include /* for io_request_lock (spinlock) decl */ +#include "../../scsi/scsi.h" +#include "../../scsi/hosts.h" #define COPYRIGHT "Copyright (c) 1999-2001 LSI Logic Corporation" -#define MODULEAUTHOR "Steven J. Ralston, Noah Romer" +#define MODULEAUTHOR "Steven J. Ralston, Noah Romer, Pamela Delaney" #include "mptbase.h" +#include "mptctl.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT misc device (ioctl) driver" @@ -96,21 +107,59 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_id = -1; -static int rwperf_reset = 0; static struct semaphore mptctl_syscall_sem_ioc[MPT_MAX_ADAPTERS]; +static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static int mpt_ioctl_rwperf(unsigned long arg); -static int mpt_ioctl_rwperf_status(unsigned long arg); -static int mpt_ioctl_rwperf_reset(unsigned long arg); -static int mpt_ioctl_fw_download(unsigned long arg); -static int mpt_ioctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen); -static int mpt_ioctl_scsi_cmd(unsigned long arg); +struct buflist { + u8 *kptr; + int len; +}; + +/* + * Function prototypes. Called from OS entry point mptctl_ioctl. + * arg contents specific to function. + */ +static int mptctl_fw_download(unsigned long arg); +static int mptctl_getiocinfo (unsigned long arg); +static int mptctl_gettargetinfo (unsigned long arg); +static int mptctl_readtest (unsigned long arg); +static int mptctl_mpt_command (unsigned long arg); +static int mptctl_eventquery (unsigned long arg); +static int mptctl_eventenable (unsigned long arg); +static int mptctl_eventreport (unsigned long arg); +static int mptctl_replace_fw (unsigned long arg); + +static int mptctl_do_reset(unsigned long arg); + +static int mptctl_compaq_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int mptctl_cpq_getpciinfo(unsigned long arg); +static int mptctl_cpq_getdriver(unsigned long arg); +static int mptctl_cpq_ctlr_status(unsigned long arg); +static int mptctl_cpq_target_address(unsigned long arg); +static int mptctl_cpq_passthru(unsigned long arg); +static int mptctl_compaq_scsiio(VENDOR_IOCTL_REQ *pVenReq, cpqfc_passthru_t *pPass); + +/* + * Private function calls. + */ +static int mptctl_do_mpt_command (struct mpt_ioctl_command karg, char *mfPtr, int local); +static int mptctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen); +static MptSge_t *kbuf_alloc_2_sgl( int bytes, u32 dir, int sge_offset, int *frags, + struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); +static void kfree_sgl( MptSge_t *sgl, dma_addr_t sgl_dma, + struct buflist *buflist, MPT_ADAPTER *ioc); +static void mptctl_timer_expired (unsigned long data); + +/* + * Reset Handler cleanup function + */ +static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -133,26 +182,27 @@ /* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */ #define MAX_KMALLOC_SZ (128*1024) -struct buflist { - u8 *kptr; - int len; -}; - -#define myMAX_TARGETS (1<<4) -#define myMAX_LUNS (1<<3) -#define myMAX_T_MASK (myMAX_TARGETS-1) -#define myMAX_L_MASK (myMAX_LUNS-1) -static u8 DevInUse[myMAX_TARGETS][myMAX_LUNS] = {{0,0}}; -static u32 DevIosCount[myMAX_TARGETS][myMAX_LUNS] = {{0,0}}; +#define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */ static u32 fwReplyBuffer[16]; static pMPIDefaultReply_t ReplyMsg = NULL; -/* some private forw protos */ -static SGESimple32_t *kbuf_alloc_2_sgl( int bytes, u32 dir, int *frags, - struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); -static void kfree_sgl( SGESimple32_t *sgl, dma_addr_t sgl_dma, - struct buflist *buflist, MPT_ADAPTER *ioc); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Function to return 0 if the sge Address member is 0 and + * non-zero else. Used in the mpt_do_fw_download routines. + */ +static inline int +mptctl_test_address(MptSge_t *sge) +{ +#ifdef __ia64__ + if ((sge->Address.Low) || (sge->Address.High)) + return 1; + else + return 0; +#else + return sge->Address; +#endif +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** @@ -160,7 +210,7 @@ * @ioc: Pointer to MPT adapter * @nonblock: boolean, non-zero if O_NONBLOCK is set * - * All of the mptctl commands can potentially sleep, which is illegal + * All of the ioctl commands can potentially sleep, which is illegal * with a spinlock held, thus we perform mutual exclusion here. * * Returns negative errno on error, or zero for success. @@ -168,16 +218,27 @@ static inline int mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) { - dprintk((KERN_INFO MYNAM "::mpt_syscall_down(%p,%d) called\n", ioc, nonblock)); + int rc = 0; + dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down(%p,%d) called\n", ioc, nonblock)); +#if defined(__sparc__) && defined(__sparc_v9__) /*{*/ + if (!nonblock) { + if (down_interruptible(&mptctl_syscall_sem_ioc[ioc->id])) + rc = -ERESTARTSYS; + } else { + rc = -EPERM; + } +#else if (nonblock) { if (down_trylock(&mptctl_syscall_sem_ioc[ioc->id])) - return -EAGAIN; + rc = -EAGAIN; } else { if (down_interruptible(&mptctl_syscall_sem_ioc[ioc->id])) - return -ERESTARTSYS; + rc = -ERESTARTSYS; } - return 0; +#endif + dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down return %d\n", rc)); + return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -190,18 +251,150 @@ static int mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { - u8 targ; + char *sense_data; + int sz, req_index; + u16 iocStatus; + u8 cmd; + + dctlprintk((MYIOC_s_INFO_FMT ": mptctl_reply()!\n", ioc->name)); + if (req) + cmd = req->u.hdr.Function; + else + return 1; - //dprintk((KERN_DEBUG MYNAM ": Got mptctl_reply()!\n")); + if (ioc->ioctl) { + /* If timer is not running, then an error occurred. + * A timeout will call the reset routine to reload the messaging + * queues. + * Main callback will free message and reply frames. + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_TIMER_ACTIVE) { + /* Delete this timer + */ + del_timer (&ioc->ioctl->timer); + ioc->ioctl->status &= ~MPT_IOCTL_STATUS_TIMER_ACTIVE; + + /* Set the overall status byte. Good if: + * IOC status is good OR if no reply and a SCSI IO request + */ + if (reply) { + /* Copy the reply frame (which much exist + * for non-SCSI I/O) to the IOC structure. + */ + dctlprintk((MYIOC_s_INFO_FMT ": Copying Reply Frame @%p to IOC!\n", + ioc->name, reply)); + memcpy(ioc->ioctl->ReplyFrame, reply, + MIN(ioc->reply_sz, 4*reply->u.reply.MsgLength)); + ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID; + + /* Set the command status to GOOD if IOC Status is GOOD + * OR if SCSI I/O cmd and data underrun or recovered error. + */ + iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK; + if (iocStatus == MPI_IOCSTATUS_SUCCESS) + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + + if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) || + (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) { + if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || + (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + } + } + + /* Copy the sense data - if present + */ + if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && + (reply->u.sreply.SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)){ + + sz = req->u.scsireq.SenseBufferLength; + req_index = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); + sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); + memcpy(ioc->ioctl->sense, sense_data, sz); + ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; + } + } else if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || + (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + } - if (req && req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) { - targ = req->u.scsireq.TargetID & myMAX_T_MASK; - DevIosCount[targ][0]--; - } else if (reply && req && req->u.hdr.Function == MPI_FUNCTION_FW_DOWNLOAD) { - // NOTE: Expects/requires non-Turbo reply! - dprintk((KERN_INFO MYNAM ": Caching MPI_FUNCTION_FW_DOWNLOAD reply!\n")); - memcpy(fwReplyBuffer, reply, MIN(sizeof(fwReplyBuffer), 4*reply->u.reply.MsgLength)); - ReplyMsg = (pMPIDefaultReply_t) fwReplyBuffer; + /* We are done, issue wake up + */ + ioc->ioctl->wait_done = 1; + wake_up (&mptctl_wait); + } else if (reply && cmd == MPI_FUNCTION_FW_DOWNLOAD) { + /* Two paths to FW DOWNLOAD! */ + // NOTE: Expects/requires non-Turbo reply! + dctlprintk((MYIOC_s_INFO_FMT ":Caching MPI_FUNCTION_FW_DOWNLOAD reply!\n", + ioc->name)); + memcpy(fwReplyBuffer, reply, MIN(sizeof(fwReplyBuffer), 4*reply->u.reply.MsgLength)); + ReplyMsg = (pMPIDefaultReply_t) fwReplyBuffer; + } + } + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_timer_expired + * + * Call back for timer process. Used only for ioctl functionality. + * + */ +static void mptctl_timer_expired (unsigned long data) +{ + MPT_IOCTL *ioctl = (MPT_IOCTL *) data; + + dctlprintk((KERN_NOTICE MYNAM ": Timer Expired! Host %d\n", + ioctl->ioc->id)); + + /* Issue a reset for this device. + * The IOC is not responding. + */ + mpt_HardResetHandler(ioctl->ioc, NO_SLEEP); + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_ioc_reset + * + * Clean-up functionality. Used only if there has been a + * reload of the FW due. + * + */ +static int +mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + MPT_IOCTL *ioctl = ioc->ioctl; + dctlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to IOCTL driver!\n", + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); + + if (reset_phase == MPT_IOC_PRE_RESET){ + + /* Someone has called the reset handler to + * do a hard reset. No more replies from the FW. + * Delete the timer. + */ + if (ioctl && (ioctl->status & MPT_IOCTL_STATUS_TIMER_ACTIVE)){ + + /* Delete this timer + */ + del_timer(&ioctl->timer); + } + + } else { + /* Set the status and continue IOCTL + * processing. All memory will be free'd + * by originating thread after wake_up is + * called. + */ + if (ioctl && (ioctl->status & MPT_IOCTL_STATUS_TIMER_ACTIVE)){ + ioctl->status = MPT_IOCTL_STATUS_DID_TIMEOUT; + + /* Wake up the calling process + */ + ioctl->wait_done = 1; + wake_up(&mptctl_wait); + } } return 1; @@ -209,7 +402,7 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * struct file_operations functionality. + * struct file_operations functionality. * Members: * llseek, write, read, ioctl, open, release */ @@ -235,63 +428,93 @@ static ssize_t mptctl_read(struct file *file, char *buf, size_t count, loff_t *ptr) { + printk(KERN_ERR MYNAM ": ioctl READ not yet supported\n"); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT ioctl handler + * cmd - specify the particular IOCTL command to be issued + * arg - data specific to the command. Must not be null. */ static int -mpt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +mptctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { - struct mpt_ioctl_sanity *usanity = (struct mpt_ioctl_sanity *) arg; - struct mpt_ioctl_sanity ksanity; + mpt_ioctl_header *uhdr = (mpt_ioctl_header *) arg; + mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; - dprintk((KERN_INFO MYNAM "::mpt_ioctl() called\n")); + dctlprintk(("mptctl_ioctl() called\n")); - if (copy_from_user(&ksanity, usanity, sizeof(ksanity))) { - printk(KERN_ERR "%s::mpt_ioctl() @%d - " - "Unable to copy mpt_ioctl_sanity data @ %p\n", - __FILE__, __LINE__, (void*)usanity); + if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { + printk(KERN_ERR "%s::mptctl_ioctl() @%d - " + "Unable to copy mpt_ioctl_header data @ %p\n", + __FILE__, __LINE__, (void*)uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ - /* Verify intended MPT adapter */ - iocnumX = ksanity.iocnum & 0xFF; + /* Test for Compaq-specific IOCTL's. + */ + if ((cmd == CPQFCTS_GETPCIINFO) || (cmd == CPQFCTS_CTLR_STATUS) || + (cmd == CPQFCTS_GETDRIVER) || (cmd == CPQFCTS_SCSI_PASSTHRU) || + (cmd == CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS)) + return mptctl_compaq_ioctl(file, cmd, arg); + + /* Verify intended MPT adapter - set iocnum and the adapter + * pointer (iocp) + */ + iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { - printk(KERN_ERR "%s::mpt_ioctl() @%d - ioc%d not found!\n", + printk(KERN_ERR "%s::mptctl_ioctl() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnumX); return -ENODEV; } + /* Handle those commands that are just returning + * information stored in the driver. + * These commands should never time out and are unaffected + * by TM and FW reloads. + */ + if (cmd == MPTIOCINFO) { + return mptctl_getiocinfo(arg); + } else if (cmd == MPTTARGETINFO) { + return mptctl_gettargetinfo(arg); + } else if (cmd == MPTTEST) { + return mptctl_readtest(arg); + } else if (cmd == MPTEVENTQUERY) { + return mptctl_eventquery(arg); + } else if (cmd == MPTEVENTENABLE) { + return mptctl_eventenable(arg); + } else if (cmd == MPTEVENTREPORT) { + return mptctl_eventreport(arg); + } else if (cmd == MPTFWREPLACE) { + return mptctl_replace_fw(arg); + } + + /* All of these commands require an interrupt or + * are unknown/illegal. + */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; - dprintk((KERN_INFO MYNAM "::mpt_ioctl() - Using %s\n", iocp->name)); + dctlprintk((MYIOC_s_INFO_FMT ": mptctl_ioctl()\n", iocp->name)); switch(cmd) { - case MPTRWPERF: - ret = mpt_ioctl_rwperf(arg); - break; - case MPTRWPERF_CHK: - ret = mpt_ioctl_rwperf_status(arg); - break; - case MPTRWPERF_RESET: - ret = mpt_ioctl_rwperf_reset(arg); - break; case MPTFWDOWNLOAD: - ret = mpt_ioctl_fw_download(arg); + ret = mptctl_fw_download(arg); + break; + case MPTCOMMAND: + ret = mptctl_mpt_command(arg); break; - case MPTSCSICMD: - ret = mpt_ioctl_scsi_cmd(arg); + case MPTHARDRESET: + ret = mptctl_do_reset(arg); break; default: ret = -EINVAL; @@ -302,6 +525,36 @@ return ret; } +static int mptctl_do_reset(unsigned long arg) +{ + struct mpt_ioctl_diag_reset *urinfo = (struct mpt_ioctl_diag_reset *) arg; + struct mpt_ioctl_diag_reset krinfo; + MPT_ADAPTER *iocp; + + dctlprintk((KERN_INFO "mptctl_do_reset called.\n")); + + if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { + printk(KERN_ERR "%s@%d::mptctl_do_reset - " + "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", + __FILE__, __LINE__, (void*)urinfo); + return -EFAULT; + } + + if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { + printk(KERN_ERR "%s@%d::mptctl_do_reset - ioc%d not found!\n", + __FILE__, __LINE__, krinfo.hdr.iocnum); + return -ENXIO; /* (-6) No such device or address */ + } + + if (mpt_HardResetHandler(iocp, NO_SLEEP) != 0) { + printk (KERN_ERR "%s@%d::mptctl_do_reset - reset failed.\n", + __FILE__, __LINE__); + return -1; + } + + return 0; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_open(struct inode *inode, struct file *file) { @@ -318,13 +571,29 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * MPT FW download function. Cast the arg into the mpt_fw_xfer structure. + * This structure contains: iocnum, firmware length (bytes), + * pointer to user space memory where the fw image is stored. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENXIO if no such device + * -EAGAIN if resource problem + * -ENOMEM if no memory for SGE + * -EMLINK if too many chain buffers required + * -EBADRQC if adapter does not support FW download + * -EBUSY if adapter is busy + * -ENOMSG if FW upload returned bad status + */ static int -mpt_ioctl_fw_download(unsigned long arg) +mptctl_fw_download(unsigned long arg) { struct mpt_fw_xfer *ufwdl = (struct mpt_fw_xfer *) arg; struct mpt_fw_xfer kfwdl; - dprintk((KERN_INFO "mpt_ioctl_fwdl called. mptctl_id = %xh\n", mptctl_id)); //tc + dctlprintk((KERN_INFO "mptctl_fwdl called. mptctl_id = %xh\n", mptctl_id)); //tc if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { printk(KERN_ERR "%s@%d::_ioctl_fwdl - " "Unable to copy mpt_fw_xfer struct @ %p\n", @@ -332,44 +601,52 @@ return -EFAULT; } - return mpt_ioctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * MPT FW Download + * FW Download engine. + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENXIO if no such device + * -EAGAIN if resource problem + * -ENOMEM if no memory for SGE + * -EMLINK if too many chain buffers required + * -EBADRQC if adapter does not support FW download + * -EBUSY if adapter is busy + * -ENOMSG if FW upload returned bad status */ static int -mpt_ioctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen) +mptctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; MPT_ADAPTER *iocp; -// char *fwbuf; -// dma_addr_t fwbuf_dma; - FWDownloadTCSGE_t *fwVoodoo; -// SGEAllUnion_t *fwSgl; + FWDownloadTCSGE_t *ptsge; + MptSge_t *sgl; + MptSge_t *sgOut, *sgIn; + struct buflist *buflist; + struct buflist *bl; + dma_addr_t sgl_dma; int ret; - - SGESimple32_t *sgl; - SGESimple32_t *sgOut, *sgIn; - dma_addr_t sgl_dma; - struct buflist *buflist = NULL; - struct buflist *bl = NULL; - int numfrags = 0; - int maxfrags; - int n = 0; - u32 sgdir; - u32 nib; - int fw_bytes_copied = 0; - u16 iocstat; - int i; - - dprintk((KERN_INFO "mpt_ioctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); - - dprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); - dprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); - dprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); + int numfrags = 0; + int maxfrags; + int n = 0; + u32 sgdir; + u32 nib; + int fw_bytes_copied = 0; + int i; + int cntdn; + int sge_offset = 0; + u16 iocstat; + + dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); + + dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); + dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); + dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { printk("%s@%d::_ioctl_fwdl - ioc%d not found!\n", @@ -377,11 +654,13 @@ return -ENXIO; /* (-6) No such device or address */ } + /* Valid device. Get a message frame and construct the FW download message. + */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) return -EAGAIN; dlmsg = (FWDownload_t*) mf; - fwVoodoo = (FWDownloadTCSGE_t *) &dlmsg->SGL; - sgOut = (SGESimple32_t *) (fwVoodoo + 1); + ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; + sgOut = (MptSge_t *) (ptsge + 1); /* * Construct f/w download request @@ -393,27 +672,36 @@ dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; dlmsg->MsgFlags = 0; - fwVoodoo->Reserved = 0; - fwVoodoo->ContextSize = 0; - fwVoodoo->DetailsLength = 12; - fwVoodoo->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; - fwVoodoo->Reserved1 = 0; - fwVoodoo->ImageOffset = 0; - fwVoodoo->ImageSize = cpu_to_le32(fwlen); + /* Set up the Transaction SGE. + */ + ptsge->Reserved = 0; + ptsge->ContextSize = 0; + ptsge->DetailsLength = 12; + ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; + ptsge->Reserved_0100_Checksum = 0; + ptsge->ImageOffset = 0; + ptsge->ImageSize = cpu_to_le32(fwlen); + + /* Add the SGL + */ /* * Need to kmalloc area(s) for holding firmware image bytes. * But we need to do it piece meal, using a proper * scatter gather list (with 128kB MAX hunks). - * + * * A practical limit here might be # of sg hunks that fit into * a single IOC request frame; 12 or 8 (see below), so: * For FC9xx: 12 x 128kB == 1.5 mB (max) * For C1030: 8 x 128kB == 1 mB (max) * We could support chaining, but things get ugly(ier:) + * + * Set the sge_offset to the start of the sgl (bytes). */ sgdir = 0x04000000; /* IOC will READ from sys mem */ - if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) + sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); + if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, + &numfrags, &buflist, &sgl_dma, iocp)) == NULL) return -ENOMEM; /* @@ -421,16 +709,19 @@ * for FC9xx f/w image, but calculate max number of sge hunks * we can fit into a request frame, and limit ourselves to that. * (currently no chain support) - * For FC9xx: (128-12-16)/8 = 12.5 = 12 - * For C1030: (96-12-16)/8 = 8.5 = 8 + * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE + * Request maxfrags + * 128 12 + * 96 8 + * 64 4 */ - maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / sizeof(SGESimple32_t); + maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / sizeof(MptSge_t); if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; } - dprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); + dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); /* * Parse SG list, copying sgl itself, @@ -440,11 +731,17 @@ sgIn = sgl; bl = buflist; for (i=0; i < numfrags; i++) { - nib = (le32_to_cpu(sgIn->FlagsLength) & 0xF0000000) >> 28; - /* skip ignore/chain. */ + + /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE + * Skip everything but Simple. If simple, copy from + * user space into kernel space. + * Note: we should not have anything but Simple as + * Chain SGE are illegal. + */ + nib = (le32_to_cpu(sgIn->FlagsLength) & 0x30000000) >> 28; if (nib == 0 || nib == 3) { ; - } else if (sgIn->Address) { + } else if (mptctl_test_address(sgIn)) { *sgOut = *sgIn; n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { @@ -479,26 +776,24 @@ /* * Wait until the reply has been received */ - { - int foo = 0; - - while (ReplyMsg == NULL) { - if (!(foo%1000000)) { - dprintk((KERN_INFO "DbG::_do_fwdl: " - "In ReplyMsg loop - iteration %d\n", - foo)); //tc - } + for (cntdn=HZ*60, i=1; ReplyMsg == NULL; cntdn--, i++) { + if (!cntdn) { ret = -ETIME; - if (++foo > 60000000) - goto fwdl_out; - mb(); - schedule(); - barrier(); + goto fwdl_out; + } + + if (!(i%HZ)) { + dctlprintk((KERN_INFO "DbG::_do_fwdl: " + "In ReplyMsg loop - iteration %d\n", + i)); } + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); } if (sgl) - kfree_sgl(sgl, sgl_dma, buflist, iocp); + kfree_sgl(sgl, sgl_dma, buflist, iocp); iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { @@ -528,32 +823,46 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * NEW rwperf (read/write performance) stuff starts here... + * SGE Allocation routine + * + * Inputs: bytes - number of bytes to be transferred + * sgdir - data direction + * sge_offset - offset (in bytes) from the start of the request + * frame to the first SGE + * ioc - pointer to the mptadapter + * Outputs: frags - number of scatter gather elements + * blp - point to the buflist pointer + * sglbuf_dma - pointer to the (dma) sgl + * Returns: Null if failes + * pointer to the (virtual) sgl if successful. */ - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static SGESimple32_t * -kbuf_alloc_2_sgl(int bytes, u32 sgdir, int *frags, +static MptSge_t * +kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) { - SGESimple32_t *sglbuf = NULL; - struct buflist *buflist = NULL; + MptSge_t *sglbuf = NULL; /* pointer to array of SGE + * and chain buffers */ + struct buflist *buflist = NULL; /* kernel routine */ + MptSge_t *sgl; + MptChain_t *last_chain = NULL; int numfrags = 0; int fragcnt = 0; int alloc_sz = MIN(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg! int bytes_allocd = 0; int this_alloc; - SGESimple32_t *sgl; - u32 pa; // phys addr - SGEChain32_t *last_chain = NULL; - SGEChain32_t *old_chain = NULL; + dma_addr_t pa; // phys addr int chaincnt = 0; int i, buflist_ent; int sg_spill = MAX_FRAGS_SPILL1; int dir; + /* initialization */ *frags = 0; *blp = NULL; + + /* Allocate and initialize an array of kernel + * structures for the SG elements. + */ i = MAX_SGL_BYTES / 8; buflist = kmalloc(i, GFP_USER); if (buflist == NULL) @@ -561,6 +870,11 @@ memset(buflist, 0, i); buflist_ent = 0; + /* Allocate a single block of memory to store the sg elements and + * the chain buffers. The calling routine is responsible for + * copying the data in this array into the correct place in the + * request and chain buffers. + */ sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma); if (sglbuf == NULL) goto free_and_fail; @@ -570,7 +884,15 @@ else dir = PCI_DMA_FROMDEVICE; + /* At start: + * sgl = sglbuf = point to beginning of sg buffer + * buflist_ent = 0 = first kernel structure + * sg_spill = number of SGE that can be written before the first + * chain element. + * + */ sgl = sglbuf; + sg_spill = ((ioc->req_sz - sge_offset)/ sizeof(MptSge_t)) - 1; while (bytes_allocd < bytes) { this_alloc = MIN(alloc_sz, bytes-bytes_allocd); buflist[buflist_ent].len = this_alloc; @@ -595,7 +917,7 @@ /* Write one SIMPLE sge */ sgl->FlagsLength = cpu_to_le32(0x10000000|sgdir|this_alloc); dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); - sgl->Address = cpu_to_le32(dma_addr); + cpu_to_leXX(dma_addr, sgl->Address); fragcnt++; numfrags++; @@ -610,24 +932,43 @@ if (fragcnt == sg_spill) { dma_addr_t chain_link; - if (last_chain != NULL) - last_chain->NextChainOffset = 0x1E; - - fragcnt = 0; - sg_spill = MAX_FRAGS_SPILL2; + /* If there is a chain element, set the offset + * (in 32 bit words) to the next chain element. + * fragcnt = # sge = 8 bytes = 2 words + * + * Set the length of the chain element (bytes) + * This includes the size of the next chain element. + * + * We are now done with last_chain and the previous + * buffer. + */ + if (last_chain != NULL) { + last_chain->NextChainOffset = fragcnt * 2; + last_chain->Length = cpu_to_le16((fragcnt+1) * 8); + } - /* fixup previous SIMPLE sge */ + /* Finish the current buffer: + * - add the LE bit to last sge + * - add the chain element + */ sgl[-1].FlagsLength |= cpu_to_le32(0x80000000); chain_link = (*sglbuf_dma) + ((u8 *)(sgl+1) - (u8 *)sglbuf); /* Write one CHAIN sge */ - sgl->FlagsLength = cpu_to_le32(0x30000080); - sgl->Address = cpu_to_le32(chain_link); +// sgl->FlagsLength = cpu_to_le32(0x30000080); + sgl->FlagsLength = cpu_to_le32(0x30000000); + cpu_to_leXX(chain_link, sgl->Address); + + /* Reset everything for the next SGE series, + * save a ptr to the chain element in last_chain + */ + fragcnt = 0; +// sg_spill = MAX_FRAGS_SPILL2; + sg_spill = (ioc->req_sz / sizeof(MptSge_t)) - 1; - old_chain = last_chain; - last_chain = (SGEChain32_t*)sgl; + last_chain = (MptChain_t*)sgl; chaincnt++; numfrags++; sgl++; @@ -647,18 +988,19 @@ /* Last sge fixup: set LE+eol+eob bits */ sgl[-1].FlagsLength |= cpu_to_le32(0xC1000000); - /* Chain fixup needed? */ - if (last_chain != NULL && fragcnt < 16) + /* Chain fixup needed? */ /* SteveR CHECKME!!! */ +// if (last_chain != NULL && fragcnt < 16) + if (last_chain != NULL) last_chain->Length = cpu_to_le16(fragcnt * 8); *frags = numfrags; *blp = buflist; - dprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " + dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " "%d SG frags generated! (%d CHAIN%s)\n", numfrags, chaincnt, chaincnt>1?"s":"")); - dprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " + dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " "last (big) alloc_sz=%d\n", alloc_sz)); @@ -676,7 +1018,7 @@ if ((le32_to_cpu(sglbuf[i].FlagsLength) >> 24) == 0x30) continue; - dma_addr = le32_to_cpu(sglbuf[i].Address); + leXX_to_cpu(dma_addr, sglbuf[i].Address); kptr = buflist[i].kptr; len = buflist[i].len; @@ -689,16 +1031,19 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Routine to free the SGL elements. + */ static void -kfree_sgl(SGESimple32_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) +kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) { - SGESimple32_t *sg = sgl; + MptSge_t *sg = sgl; struct buflist *bl = buflist; u32 nib; int dir; int n = 0; - if (le32_to_cpu(sg->FlagsLength) & 0x04000000) + if ((le32_to_cpu(sg->FlagsLength) & 0x04000000)) dir = PCI_DMA_TODEVICE; else dir = PCI_DMA_FROMDEVICE; @@ -708,12 +1053,12 @@ /* skip ignore/chain. */ if (nib == 0 || nib == 3) { ; - } else if (sg->Address) { + } else if (mptctl_test_address(sg)) { dma_addr_t dma_addr; void *kptr; int len; - dma_addr = le32_to_cpu(sg->Address); + leXX_to_cpu(dma_addr, sg->Address); kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); @@ -726,12 +1071,12 @@ } /* we're at eob! */ - if (sg->Address) { + if (mptctl_test_address(sg)) { dma_addr_t dma_addr; void *kptr; int len; - dma_addr = le32_to_cpu(sg->Address); + leXX_to_cpu(dma_addr, sg->Address); kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); @@ -741,363 +1086,1657 @@ pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma); kfree(buflist); - dprintk((KERN_INFO MYNAM "-SG: Free'd 1 SGL buf + %d kbufs!\n", n)); + dctlprintk((KERN_INFO MYNAM "-SG: Free'd 1 SGL buf + %d kbufs!\n", n)); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_getiocinfo - Query the host adapter for IOC information. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ static int -mpt_ioctl_rwperf_init(struct mpt_raw_r_w *dest, unsigned long src, - char *caller, MPT_ADAPTER **iocpp) +mptctl_getiocinfo (unsigned long arg) { - char *myname = "_rwperf_init()"; - int ioc; + struct mpt_ioctl_iocinfo *uarg = (struct mpt_ioctl_iocinfo *) arg; + struct mpt_ioctl_iocinfo karg; + MPT_ADAPTER *ioc; + struct pci_dev *pdev; + struct Scsi_Host *sh; + MPT_SCSI_HOST *hd; + int iocnum; + int numDevices = 0; + unsigned int max_id; + int ii; + int port; + u8 revision; + + dctlprintk((": mptctl_getiocinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_iocinfo))) { + printk(KERN_ERR "%s@%d::mptctl_getiocinfo - " + "Unable to read in mpt_ioctl_iocinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } - /* get copy of structure passed from user space */ - if (copy_from_user(dest, (void*)src, sizeof(*dest))) { - printk(KERN_ERR MYNAM "::%s() @%d - Can't copy mpt_raw_r_w data @ %p\n", - myname, __LINE__, (void*)src); - return -EFAULT; /* (-14) Bad address */ - } else { - dprintk((KERN_INFO MYNAM "-perf: PerfInfo.{ioc,targ,qd,iters,nblks}" - ": %d %d %d %d %d\n", - dest->iocnum, dest->target, - (int)dest->qdepth, dest->iters, dest->nblks )); - dprintk((KERN_INFO MYNAM "-perf: PerfInfo.{cache,skip,range,rdwr,seqran}" - ": %d %d %d %d %d\n", - dest->cache_sz, dest->skip, dest->range, - dest->rdwr, dest->seqran )); - - /* Get the MPT adapter id. */ - if ((ioc = mpt_verify_adapter(dest->iocnum, iocpp)) < 0) { - printk(KERN_ERR MYNAM "::%s() @%d - ioc%d not found!\n", - myname, __LINE__, dest->iocnum); - return -ENXIO; /* (-6) No such device or address */ - } else { - dprintk((MYNAM "-perf: %s using mpt/ioc%x, target %02xh\n", - caller, dest->iocnum, dest->target)); + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + /* Verify the data transfer size is correct. + * Ignore the port setting. + */ + if (karg.hdr.maxDataSize != sizeof(struct mpt_ioctl_iocinfo)) { + printk(KERN_ERR "%s@%d::mptctl_getiocinfo - " + "Structure size mismatch. Command not completed.\n", + __FILE__, __LINE__); + return -EFAULT; + } + + /* Fill in the data and return the structure to the calling + * program + */ + if (ioc->chip_type == C1030) + karg.adapterType = MPT_IOCTL_INTERFACE_SCSI; + else + karg.adapterType = MPT_IOCTL_INTERFACE_FC; + + port = karg.hdr.port; + + karg.port = port; + pdev = (struct pci_dev *) ioc->pcidev; + + karg.pciId = pdev->device; + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); + karg.hwRev = revision; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + karg.subSystemDevice = pdev->subsystem_device; + karg.subSystemVendor = pdev->subsystem_vendor; +#endif + + /* Get number of devices + */ + if ( (sh = ioc->sh) != NULL) { + + /* sh->max_id = maximum target ID + 1 + */ + max_id = sh->max_id - 1; + hd = (MPT_SCSI_HOST *) sh->hostdata; + + /* Check all of the target structures and + * keep a counter. + */ + if (hd && hd->Targets) { + for (ii = 0; ii <= max_id; ii++) { + if (hd->Targets[ii]) + numDevices++; + } + } + } + karg.numDevices = numDevices; + + /* Set the BIOS and FW Version + */ + karg.FWVersion = ioc->facts.FWVersion.Word; + karg.BIOSVersion = ioc->biosVersion; + + /* Set the Version Strings. + */ + strncpy (karg.driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); + + karg.busChangeEvent = 0; + karg.hostId = ioc->pfacts[port].PortSCSIID; + karg.rsvd[0] = karg.rsvd[1] = 0; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(struct mpt_ioctl_iocinfo))) { + printk(KERN_ERR "%s@%d::mptctl_getiocinfo - " + "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_gettargetinfo - Query the host adapter for target information. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_gettargetinfo (unsigned long arg) +{ + struct mpt_ioctl_targetinfo *uarg = (struct mpt_ioctl_targetinfo *) arg; + struct mpt_ioctl_targetinfo karg; + MPT_ADAPTER *ioc; + struct Scsi_Host *sh; + MPT_SCSI_HOST *hd; + char *pmem; + int *pdata; + int iocnum; + int numDevices = 0; + unsigned int max_id; + int ii, jj, lun; + int maxWordsLeft; + int numBytes; + u8 port; + + dctlprintk(("mptctl_gettargetinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { + printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - " + "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + /* Get the port number and set the maximum number of bytes + * in the returned structure. + * Ignore the port setting. + */ + numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); + maxWordsLeft = numBytes/sizeof(int); + port = karg.hdr.port; + + if (maxWordsLeft <= 0) { + printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n", + __FILE__, __LINE__); + return -ENOMEM; + } + + /* Fill in the data and return the structure to the calling + * program + */ + + /* struct mpt_ioctl_targetinfo does not contain sufficient space + * for the target structures so when the IOCTL is called, there is + * not sufficient stack space for the structure. Allocate memory, + * populate the memory, copy back to the user, then free memory. + * targetInfo format: + * bits 31-24: reserved + * 23-16: LUN + * 15- 8: Bus Number + * 7- 0: Target ID + */ + pmem = kmalloc(numBytes, GFP_KERNEL); + if (pmem == NULL) { + printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n", + __FILE__, __LINE__); + return -ENOMEM; + } + memset(pmem, 0, numBytes); + pdata = (int *) pmem; + + /* Get number of devices + */ + if ( (sh = ioc->sh) != NULL) { + + max_id = sh->max_id - 1; + hd = (MPT_SCSI_HOST *) sh->hostdata; + + /* Check all of the target structures. + * Save the Id and increment the counter, + * if ptr non-null. + * sh->max_id = maximum target ID + 1 + */ + if (hd && hd->Targets) { + ii = 0; + while (ii <= max_id) { + if (hd->Targets[ii]) { + for (jj = 0; jj <= MPT_LAST_LUN; jj++) { + lun = (1 << jj); + if (hd->Targets[ii]->luns & lun) { + numDevices++; + *pdata = (jj << 16) | ii; + --maxWordsLeft; + + pdata++; + + if (maxWordsLeft <= 0) { + break; + } + } + } + } + ii++; + } } } + karg.numDevices = numDevices; + + /* Copy part of the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(struct mpt_ioctl_targetinfo))) { + printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + kfree(pmem); + return -EFAULT; + } + + /* Copy the remaining data from kernel memory to user memory + */ + if (copy_to_user((char *) uarg->targetInfo, pmem, numBytes)) { + printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, (void*)pdata); + kfree(pmem); + return -EFAULT; + } + + kfree(pmem); - return ioc; + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* MPT IOCTL Test function. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_readtest (unsigned long arg) +{ + struct mpt_ioctl_test *uarg = (struct mpt_ioctl_test *) arg; + struct mpt_ioctl_test karg; + MPT_ADAPTER *ioc; + int iocnum; + + dctlprintk(("mptctl_readtest called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { + printk(KERN_ERR "%s@%d::mptctl_readtest - " + "Unable to read in mpt_ioctl_test struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_readtest() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } -/* Treat first N blocks of disk as sacred! */ -#define SACRED_BLOCKS 100 - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static int -mpt_ioctl_rwperf(unsigned long arg) -{ - struct mpt_raw_r_w kPerfInfo; - /* NOTE: local copy, on stack==KERNEL_SPACE! */ - u8 target, targetM; - u8 lun, lunM; - u8 scsiop; - int qdepth; - int iters; - int cache_sz; - u32 xferbytes; - u32 scsidir; - u32 qtag; - u32 scsictl; - u32 sgdir; - u32 blkno; - u32 sbphys; - SGESimple32_t *sgl; - dma_addr_t sgl_dma; - struct buflist *buflist; - SGESimple32_t *sgOut, *sgIn; - int numfrags; - u32 *msg; - int i; - int ioc; - MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; - int sgfragcpycnt; - int blklo, blkhi; - u8 nextchainoffset; - u8 *SenseBuf; - dma_addr_t SenseBufDMA; - char *myname = "_rwperf()"; - - dprintk((KERN_INFO "%s - starting...\n", myname)); - - /* Validate target device */ - if ((ioc = mpt_ioctl_rwperf_init(&kPerfInfo, arg, myname, &iocp)) < 0) - return ioc; - - /* Allocate DMA'able memory for the sense buffer. */ - SenseBuf = pci_alloc_consistent(iocp->pcidev, 256, &SenseBufDMA); - - /* set perf parameters from input */ - target = kPerfInfo.target & 0x0FF; - targetM = target & myMAX_T_MASK; - lun = kPerfInfo.lun & 0x1F; // LUN=31 max - lunM = lun & myMAX_L_MASK; - qdepth = kPerfInfo.qdepth; - iters = kPerfInfo.iters; - xferbytes = ((u32)kPerfInfo.nblks)<<9; - - DevInUse[targetM][lunM] = 1; - DevIosCount[targetM][lunM] = 0; - - cache_sz = kPerfInfo.cache_sz * 1024; // CacheSz in kB! - - /* ToDo: */ - /* get capacity (?) */ - - - // pre-build, one time, everything we can for speed in the loops below... - - scsiop = 0x28; // default to SCSI READ! - scsidir = MPI_SCSIIO_CONTROL_READ; // DATA IN (host<--ioc<--dev) - // 02000000 - qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; // 00000000 - - if (xferbytes == 0) { - // Do 0-byte READ!!! - // IMPORTANT! Need to set no SCSI DIR for this! - scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER; - } - - scsictl = scsidir | qtag; - - /* - * Set sgdir for DMA transfer. - */ -// sgdir = 0x04000000; // SCSI WRITE - sgdir = 0x00000000; // SCSI READ - - if ((sgl = kbuf_alloc_2_sgl(MAX(512,xferbytes), sgdir, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) - return -ENOMEM; - - sgfragcpycnt = MIN(10,numfrags); - nextchainoffset = 0; - if (numfrags > 10) - nextchainoffset = 0x1E; - - sbphys = SenseBufDMA; - - rwperf_reset = 0; - -// do { // target-loop - - blkno = SACRED_BLOCKS; // Treat first N blocks as sacred! - // FIXME! Skip option - blklo = blkno; - blkhi = blkno; - - do { // inner-loop - - while ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { - mb(); - schedule(); - barrier(); - } - msg = (u32*)mf; - - /* Start piecing the SCSIIORequest together */ - msg[0] = 0x00000000 | nextchainoffset<<16 | target; - msg[1] = 0x0000FF0A; // 255 sense bytes, 10-byte CDB! - msg[3] = lun << 8; - msg[4] = 0; - msg[5] = scsictl; - - // 16 bytes of CDB @ msg[6,7,8,9] are below... - - msg[6] = ( ((blkno & 0xFF000000) >> 8) - | ((blkno & 0x00FF0000) << 8) - | scsiop ); - msg[7] = ( (((u32)kPerfInfo.nblks & 0x0000FF00) << 16) - | ((blkno & 0x000000FF) << 8) - | ((blkno & 0x0000FF00) >> 8) ); - msg[8] = (kPerfInfo.nblks & 0x00FF); - msg[9] = 0; - - msg[10] = xferbytes; - -// msg[11] = 0xD0000100; -// msg[12] = sbphys; -// msg[13] = 0; - msg[11] = sbphys; - - // Copy the SGL... - if (xferbytes) { - sgOut = (SGESimple32_t*)&msg[12]; - sgIn = sgl; - for (i=0; i < sgfragcpycnt; i++) - *sgOut++ = *sgIn++; - } - - // fubar! QueueDepth issue!!! - while ( !rwperf_reset - && (DevIosCount[targetM][lunM] >= MIN(qdepth,64)) ) - { - mb(); - schedule(); - barrier(); - } - -// blkno += kPerfInfo.nblks; -// EXP Stuff! -// Try optimizing to certain cache size for the target! -// by keeping blkno within cache range if at all possible -#if 0 - if ( cache_sz - && ((2 * kPerfInfo.nblks) <= (cache_sz>>9)) - && ((blkno + kPerfInfo.nblks) > ((cache_sz>>9) + SACRED_BLOCKS)) ) - blkno = SACRED_BLOCKS; - else - blkno += kPerfInfo.nblks; + /* Fill in the data and return the structure to the calling + * program + */ + +#ifdef MFCNT + karg.chip_type = ioc->mfcnt; +#else + karg.chip_type = ioc->chip_type; #endif -// Ok, cheat! - if (cache_sz && ((blkno + kPerfInfo.nblks) > ((cache_sz>>9) + SACRED_BLOCKS)) ) - blkno = SACRED_BLOCKS; - else - blkno += kPerfInfo.nblks; + strncpy (karg.name, ioc->name, MPT_MAX_NAME); + strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH); - if (blkno > blkhi) - blkhi = blkno; + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, sizeof(struct mpt_ioctl_test))) { + printk(KERN_ERR "%s@%d::mptctl_readtest - " + "Unable to write out mpt_ioctl_test struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } - DevIosCount[targetM][lunM]++; + return 0; +} - /* - * Finally, post the request - */ - mpt_put_msg_frame(mptctl_id, ioc, mf); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_eventquery - Query the host adapter for the event types + * that are being logged. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_eventquery (unsigned long arg) +{ + struct mpt_ioctl_eventquery *uarg = (struct mpt_ioctl_eventquery *) arg; + struct mpt_ioctl_eventquery karg; + MPT_ADAPTER *ioc; + int iocnum; + dctlprintk(("mptctl_eventquery called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { + printk(KERN_ERR "%s@%d::mptctl_eventquery - " + "Unable to read in mpt_ioctl_eventquery struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } - /* let linux breath! */ - mb(); - schedule(); - barrier(); + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_eventquery() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } - //dprintk((KERN_DEBUG MYNAM "-perf: inner-loop, cnt=%d\n", iters)); + karg.eventEntries = ioc->eventLogSize; + karg.eventTypes = ioc->eventTypes; - } while ((--iters > 0) && !rwperf_reset); + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { + printk(KERN_ERR "%s@%d::mptctl_eventquery - " + "Unable to write out mpt_ioctl_eventquery struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + return 0; +} - dprintk((KERN_INFO MYNAM "-perf: DbG: blklo=%d, blkhi=%d\n", blklo, blkhi)); - dprintk((KERN_INFO MYNAM "-perf: target-loop, thisTarget=%d\n", target)); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_eventenable (unsigned long arg) +{ + struct mpt_ioctl_eventenable *uarg = (struct mpt_ioctl_eventenable *) arg; + struct mpt_ioctl_eventenable karg; + MPT_ADAPTER *ioc; + int iocnum; -// // TEMPORARY! -// target = 0; + dctlprintk(("mptctl_eventenable called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { + printk(KERN_ERR "%s@%d::mptctl_eventenable - " + "Unable to read in mpt_ioctl_eventenable struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } -// } while (target); + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_eventenable() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + if (ioc->events == NULL) { + /* Have not yet allocated memory - do so now. + */ + int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); + ioc->events = kmalloc(sz, GFP_KERNEL); + if (ioc->events == NULL) { + printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); + return -ENOMEM; + } + memset(ioc->events, 0, sz); + ioc->alloc_total += sz; - if (DevIosCount[targetM][lunM]) { - dprintk((KERN_INFO " DbG: DevIosCount[%d][%d]=%d\n", - targetM, lunM, DevIosCount[targetM][lunM])); - } + ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE; + ioc->eventContext = 0; + } - while (DevIosCount[targetM][lunM]) { - //dprintk((KERN_DEBUG " DbG: Waiting... DevIosCount[%d][%d]=%d\n", - // targetM, lunM, DevIosCount[targetM][lunM])); - mb(); - schedule(); - barrier(); - } - DevInUse[targetM][lunM] = 0; + /* Update the IOC event logging flag. + */ + ioc->eventTypes = karg.eventTypes; - pci_free_consistent(iocp->pcidev, 256, SenseBuf, SenseBufDMA); + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_eventreport (unsigned long arg) +{ + struct mpt_ioctl_eventreport *uarg = (struct mpt_ioctl_eventreport *) arg; + struct mpt_ioctl_eventreport karg; + MPT_ADAPTER *ioc; + int iocnum; + int numBytes, maxEvents, max; + + dctlprintk(("mptctl_eventreport called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { + printk(KERN_ERR "%s@%d::mptctl_eventreport - " + "Unable to read in mpt_ioctl_eventreport struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_eventreport() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); + maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); - if (sgl) - kfree_sgl(sgl, sgl_dma, buflist, iocp); - dprintk((KERN_INFO " *** done ***\n")); + max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; - return 0; + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if ((max < 1) || !ioc->events) + return -ENODATA; + + /* Copy the data from kernel memory to user memory + */ + numBytes = max * sizeof(MPT_IOCTL_EVENTS); + if (copy_to_user((char *) uarg->eventData, ioc->events, numBytes)) { + printk(KERN_ERR "%s@%d::mptctl_eventreport - " + "Unable to write out mpt_ioctl_eventreport struct @ %p\n", + __FILE__, __LINE__, (void*)ioc->events); + return -EFAULT; + } + + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mpt_ioctl_rwperf_status(unsigned long arg) +mptctl_replace_fw (unsigned long arg) { - struct mpt_raw_r_w kPerfInfo; - /* NOTE: local copy, on stack==KERNEL_SPACE! */ - MPT_ADAPTER *iocp; - int ioc; -// u8 targ; -// u8 lun; - int T, L; - char *myname = "_rwperf_status()"; + struct mpt_ioctl_replace_fw *uarg = (struct mpt_ioctl_replace_fw *) arg; + struct mpt_ioctl_replace_fw karg; + MPT_ADAPTER *ioc; + int iocnum; + u8 *mem = NULL; + dma_addr_t mem_dma; + int oldFwSize, newFwSize; + + dctlprintk(("mptctl_replace_fw called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { + printk(KERN_ERR "%s@%d::mptctl_replace_fw - " + "Unable to read in mpt_ioctl_replace_fw struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + /* If not caching FW, return 0 + */ + if ((ioc->FWImage == NULL) && (ioc->alt_ioc) && (ioc->alt_ioc->FWImage == NULL)) { + return 0; + } + + + /* Allocate memory for the new FW image + */ + newFwSize = karg.newImageSize; + mem = pci_alloc_consistent(ioc->pcidev, newFwSize, &mem_dma); + if (mem == NULL) + return -ENOMEM; + + ioc->alloc_total += newFwSize; - dprintk((KERN_INFO "%s - starting...\n", myname)); + /* Copy the data from user memory to kernel space + */ + if (copy_from_user(mem, uarg->newImage, newFwSize)) { + printk(KERN_ERR "%s@%d::mptctl_replace_fw - " + "Unable to read in mpt_ioctl_replace_fw image @ %p\n", + __FILE__, __LINE__, (void*)uarg); + pci_free_consistent(ioc->pcidev, newFwSize, mem, mem_dma); + ioc->alloc_total -= newFwSize; + return -EFAULT; + } + + /* Free the old FW image + */ + oldFwSize = ioc->facts.FWImageSize; + if (ioc->FWImage) { + pci_free_consistent(ioc->pcidev, oldFwSize, ioc->FWImage, ioc->FWImage_dma); + ioc->alloc_total -= oldFwSize; + ioc->FWImage = mem; + ioc->FWImage_dma = mem_dma; + + } else if ((ioc->alt_ioc) && (ioc->alt_ioc->FWImage)) { + pci_free_consistent(ioc->pcidev, oldFwSize, ioc->alt_ioc->FWImage, ioc->alt_ioc->FWImage_dma); + ioc->alloc_total -= oldFwSize; + ioc->alt_ioc->FWImage = mem; + ioc->alt_ioc->FWImage_dma = mem_dma; + } - /* Get a pointer to the MPT adapter. */ - if ((ioc = mpt_ioctl_rwperf_init(&kPerfInfo, arg, myname, &iocp)) < 0) - return ioc; + /* Update IOCFactsReply + */ + ioc->facts.FWImageSize = newFwSize; + if (ioc->alt_ioc) + ioc->alt_ioc->facts.FWImageSize = newFwSize; - /* set perf parameters from input */ -// targ = kPerfInfo.target & 0xFF; -// lun = kPerfInfo.lun & 0x1F; + return 0; +} - for (T=0; T < myMAX_TARGETS; T++) - for (L=0; L < myMAX_LUNS; L++) - if (DevIosCount[T][L]) { - printk(KERN_INFO "%s: ioc%d->00:%02x:%02x" - ", IosCnt=%d\n", - myname, ioc, T, L, DevIosCount[T][L] ); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* MPT IOCTL MPTCOMMAND function. + * Cast the arg into the mpt_ioctl_mpt_command structure. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_mpt_command (unsigned long arg) +{ + struct mpt_ioctl_command *uarg = (struct mpt_ioctl_command *) arg; + struct mpt_ioctl_command karg; + MPT_ADAPTER *ioc; + int iocnum; + int rc; + + dctlprintk(("mptctl_command called.\n")); + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { + printk(KERN_ERR "%s@%d::mptctl_mpt_command - " + "Unable to read in mpt_ioctl_command struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + rc = mptctl_do_mpt_command (karg, (char *) &uarg->MF, 0); + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_do_mpt_command (struct mpt_ioctl_command karg, char *mfPtr, int local) +{ + MPT_ADAPTER *ioc; + MPT_FRAME_HDR *mf = NULL; + MPIHeader_t *hdr; + MptSge_t *psge; + MptSge_t *this_sge = NULL; + MptSge_t *sglbuf = NULL; + struct buflist bufIn; /* data In buffer */ + struct buflist bufOut; /* data Out buffer */ + dma_addr_t sglbuf_dma; + dma_addr_t dma_addr; + int dir; /* PCI data direction */ + int sgSize = 0; /* Num SG elements */ + int this_alloc; + int iocnum, flagsLength; + int sz, rc = 0; + int msgContext; + u16 req_idx; + + dctlprintk(("mptctl_do_mpt_command called.\n")); + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + if (!ioc->ioctl) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "No memory available during driver init.\n", + __FILE__, __LINE__); + return -ENOMEM; + } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_TIMEOUT) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Busy with IOC Reset \n", __FILE__, __LINE__); + return -EBUSY; + } + + /* Verify that the final request frame will not be too large. + */ + sz = karg.dataSgeOffset * 4; + if (karg.dataInSize > 0) + sz += sizeof (MptSge_t); + if (karg.dataOutSize > 0) + sz += sizeof (MptSge_t); + + if ( sz > ioc->req_sz) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Request frame too large (%d) maximum (%d)\n", + __FILE__, __LINE__, sz, ioc->req_sz); + return -EFAULT; + } + + /* Get a free request frame and save the message context. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, ioc->id)) == NULL) + return -EAGAIN; + + hdr = (MPIHeader_t *) mf; + msgContext = le32_to_cpu(hdr->MsgContext); + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + + /* Copy the request frame + * Reset the saved message context. + */ + if (local) { + /* Request frame in kernel space + */ + memcpy((char *)mf, (char *) mfPtr, karg.dataSgeOffset * 4); + } else { + /* Request frame in user space + */ + if (copy_from_user((char *)mf, (char *) mfPtr, + karg.dataSgeOffset * 4)){ + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to read MF from mpt_ioctl_command struct @ %p\n", + __FILE__, __LINE__, (void*)mfPtr); + rc = -EFAULT; + goto done_free_mem; + } + } + hdr->MsgContext = cpu_to_le32(msgContext); + + + /* Verify that this request is allowed. + */ + switch (hdr->Function) { + case MPI_FUNCTION_IOC_FACTS: + case MPI_FUNCTION_PORT_FACTS: + case MPI_FUNCTION_CONFIG: + case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: + case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: + case MPI_FUNCTION_FW_UPLOAD: + case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + case MPI_FUNCTION_FW_DOWNLOAD: + break; + + case MPI_FUNCTION_SCSI_IO_REQUEST: + if (ioc->sh) { + SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; + VirtDevice *pTarget = NULL; + MPT_SCSI_HOST *hd = NULL; + int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; + int scsidir = 0; + int target = (int) pScsiReq->TargetID; + int dataSize; + + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; + + /* verify that app has not requested + * more sense data than driver + * can provide, if so, reset this parameter + * set the sense buffer pointer low address + * update the control field to specify Q type + */ + if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + + pScsiReq->SenseBufferLowAddr = + cpu_to_le32(ioc->sense_buf_low_dma + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + if ( (hd = (MPT_SCSI_HOST *) ioc->sh->hostdata)) { + if (hd->Targets) + pTarget = hd->Targets[target]; } + if (pTarget &&(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) + qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; + + /* Have the IOCTL driver set the direction based + * on the dataOutSize (ordering issue with Sparc). + */ + if (karg.dataOutSize > 0 ) { + scsidir = MPI_SCSIIO_CONTROL_WRITE; + dataSize = karg.dataOutSize; + } + else { + scsidir = MPI_SCSIIO_CONTROL_READ; + dataSize = karg.dataInSize; + } + + pScsiReq->Control = cpu_to_le32(scsidir | qtag); + pScsiReq->DataLength = cpu_to_le32(dataSize); + + } else { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + case MPI_FUNCTION_RAID_ACTION: + /* Just add a SGE + */ + break; + + case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + if (ioc->sh) { + SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; + int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; + int scsidir = MPI_SCSIIO_CONTROL_READ; + int dataSize; + + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; + + /* verify that app has not requested + * more sense data than driver + * can provide, if so, reset this parameter + * set the sense buffer pointer low address + * update the control field to specify Q type + */ + if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + + pScsiReq->SenseBufferLowAddr = + cpu_to_le32(ioc->sense_buf_low_dma + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + /* All commands to physical devices are tagged + */ + + /* Have the IOCTL driver set the direction based + * on the dataOutSize (ordering issue with Sparc). + */ + if (karg.dataOutSize > 0 ) { + scsidir = MPI_SCSIIO_CONTROL_WRITE; + dataSize = karg.dataOutSize; + } + else { + scsidir = MPI_SCSIIO_CONTROL_READ; + dataSize = karg.dataInSize; + } + + pScsiReq->Control = cpu_to_le32(scsidir | qtag); + pScsiReq->DataLength = cpu_to_le32(dataSize); + + } else { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + default: + /* + * MPI_FUNCTION_IOC_INIT + * MPI_FUNCTION_PORT_ENABLE + * MPI_FUNCTION_TARGET_CMD_BUFFER_POST + * MPI_FUNCTION_TARGET_ASSIST + * MPI_FUNCTION_TARGET_STATUS_SEND + * MPI_FUNCTION_TARGET_MODE_ABORT + * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET + * MPI_FUNCTION_IO_UNIT_RESET + * MPI_FUNCTION_HANDSHAKE + * MPI_FUNCTION_REPLY_FRAME_REMOVAL + * MPI_FUNCTION_EVENT_NOTIFICATION + * (driver handles event notification) + * MPI_FUNCTION_EVENT_ACK + * MPI_FUNCTION_SCSI_TASK_MGMT + */ + + /* What to do with these??? CHECK ME!!! + MPI_FUNCTION_FC_LINK_SRVC_BUF_POST + MPI_FUNCTION_FC_LINK_SRVC_RSP + MPI_FUNCTION_FC_ABORT + MPI_FUNCTION_FC_PRIMITIVE_SEND + MPI_FUNCTION_LAN_SEND + MPI_FUNCTION_LAN_RECEIVE + MPI_FUNCTION_LAN_RESET + */ + + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Illegal request (function 0x%x) \n", + __FILE__, __LINE__, hdr->Function); + rc = -EFAULT; + goto done_free_mem; + } + + /* Add the SGL ( at most one data in SGE and one data out SGE ) + * In the case of two SGE's - the data out (write) will always + * preceede the data in (read) SGE. psgList is used to free the + * allocated memory. + */ + psge = (MptSge_t *) ( ((int *) mf) + karg.dataSgeOffset); + flagsLength = 0; + + /* bufIn and bufOut are used for user to kernel space transfers + */ + bufIn.kptr = bufOut.kptr = NULL; + bufIn.len = bufOut.len = 0; + + if (karg.dataOutSize > 0 ) + sgSize ++; + + if (karg.dataInSize > 0 ) + sgSize ++; + + if (sgSize > 0) { + + /* Allocate memory for the SGL. + * Used to free kernel memory once + * the MF is freed. + */ + sglbuf = pci_alloc_consistent (ioc->pcidev, + sgSize*sizeof(MptSge_t), &sglbuf_dma); + if (sglbuf == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } + this_sge = sglbuf; + + /* Set up the dataOut memory allocation */ + if (karg.dataOutSize > 0) { + dir = PCI_DMA_TODEVICE; + if (karg.dataInSize > 0 ) { + flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_DIRECTION | + MPT_SGE_ADDRESS_SIZE ) + << MPI_SGE_FLAGS_SHIFT; + } else { + flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; + } + flagsLength |= karg.dataOutSize; + + this_alloc = karg.dataOutSize; + bufOut.len = this_alloc; + bufOut.kptr = pci_alloc_consistent( + ioc->pcidev, this_alloc, &dma_addr); + + if (bufOut.kptr == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } else { + /* Copy user data to kernel space. + */ + if (copy_from_user(bufOut.kptr, + karg.dataOutBufPtr, + bufOut.len)) { + + printk(KERN_ERR + "%s@%d::mptctl_do_mpt_command - Unable " + "to read user data " + "struct @ %p\n", + __FILE__, __LINE__,(void*)karg.dataOutBufPtr); + rc = -EFAULT; + goto done_free_mem; + } + + /* Set up this SGE. + * Copy to MF and to sglbuf + */ + + psge->FlagsLength = cpu_to_le32 (flagsLength); + cpu_to_leXX(dma_addr, psge->Address); + psge++; + + this_sge->FlagsLength=cpu_to_le32(flagsLength); + cpu_to_leXX(dma_addr, this_sge->Address); + this_sge++; + } + } + + if (karg.dataInSize > 0) { + dir = PCI_DMA_FROMDEVICE; + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; + flagsLength |= karg.dataInSize; + + this_alloc = karg.dataInSize; + bufIn.len = this_alloc; + bufIn.kptr = pci_alloc_consistent(ioc->pcidev, + this_alloc, &dma_addr); + if (bufIn.kptr == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } else { + /* Set up this SGE + * Copy to MF and to sglbuf + */ + psge->FlagsLength = cpu_to_le32 (flagsLength); + cpu_to_leXX(dma_addr, psge->Address); + + this_sge->FlagsLength=cpu_to_le32(flagsLength); + cpu_to_leXX(dma_addr, this_sge->Address); + this_sge++; + } + } + } else { + /* Add a NULL SGE + */ + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; + psge->FlagsLength = cpu_to_le32 (flagsLength); + cpu_to_leXX( (dma_addr_t) -1, psge->Address); + } + + /* The request is complete. Set the timer parameters + * and issue the request. + */ + if (karg.timeout > 0) { + ioc->ioctl->timer.expires = jiffies + HZ*karg.timeout; + } else { + ioc->ioctl->timer.expires = jiffies + HZ*MPT_IOCTL_DEFAULT_TIMEOUT; + } + + ioc->ioctl->wait_done = 0; + ioc->ioctl->status |= MPT_IOCTL_STATUS_TIMER_ACTIVE; + add_timer(&ioc->ioctl->timer); + + mpt_put_msg_frame(mptctl_id, ioc->id, mf); + wait_event(mptctl_wait, ioc->ioctl->wait_done); + + /* The command is complete. * Return data to the user. + * + * If command completed, mf has been freed so cannot + * use this memory. + * + * If timeout, a recovery mechanism has been called. + * Need to free the mf. + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_TIMEOUT) { + + /* A timeout - there is no data to return to the + * the user other than an error. + * The timer callback deleted the + * timer and reset the adapter queues. + */ + printk(KERN_WARNING "%s@%d::mptctl_do_mpt_command - " + "Timeout Occurred on IOCTL! Resetting IOC.\n", __FILE__, __LINE__); + rc = -ETIME; + + /* Free memory and return to the calling function + */ + goto done_free_mem; + + } else { + /* Callback freed request frame. + */ + mf = NULL; + + /* If a valid reply frame, copy to the user. + * Offset 2: reply length in U32's + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { + if (karg.maxReplyBytes < ioc->reply_sz) { + sz = MIN(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); + } else { + sz = MIN(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); + } + + if (sz > 0) { + if (copy_to_user((char *)karg.replyFrameBufPtr, + &ioc->ioctl->ReplyFrame, sz)){ + + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to write out reply frame %p\n", + __FILE__, __LINE__, (void*)karg.replyFrameBufPtr); + rc = -ENODATA; + goto done_free_mem; + } + } + } + + /* If valid sense data, copy to user. + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { + sz = MIN(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); + if (sz > 0) { + if (copy_to_user((char *)karg.senseDataPtr, ioc->ioctl->sense, sz)) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to write sense data to user %p\n", + __FILE__, __LINE__, + (void*)karg.senseDataPtr); + rc = -ENODATA; + goto done_free_mem; + } + } + } + + /* If the overall status is _GOOD and data in, copy data + * to user. + */ + if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && + (karg.dataInSize > 0) && (bufIn.kptr)) { + + if (copy_to_user((char *)karg.dataInBufPtr, + bufIn.kptr, karg.dataInSize)) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to write data to user %p\n", + __FILE__, __LINE__, + (void*)karg.dataInBufPtr); + rc = -ENODATA; + } + } + } + +done_free_mem: + /* Clear status bits. + */ + ioc->ioctl->status = 0; + + if (sglbuf) { + this_sge = sglbuf; + + /* Free the allocated memory. + */ + if (bufOut.kptr != NULL ) { + + leXX_to_cpu (dma_addr, this_sge->Address); + + this_sge++; /* go to next structure */ + this_alloc = bufOut.len; + pci_free_consistent(ioc->pcidev, + this_alloc, (void *) &bufOut, dma_addr); + } + + if (bufIn.kptr != NULL ) { + leXX_to_cpu (dma_addr, this_sge->Address); + this_alloc = bufIn.len; + + pci_free_consistent(ioc->pcidev, + this_alloc, (void *) &bufIn, dma_addr); + } + + this_alloc = sgSize * sizeof(MptSge_t); + pci_free_consistent(ioc->pcidev, + this_alloc, (void *) sglbuf, sglbuf_dma); + + } + + /* mf will be null if allocation failed OR + * if command completed OK (callback freed) + */ + if (mf) + mpt_free_msg_frame(mptctl_id, ioc->id, mf); + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Routine for the Compaq IOCTL commands. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_compaq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int iocnum = 0; + unsigned iocnumX = 0; + int ret; + int nonblock = (file->f_flags & O_NONBLOCK); + MPT_ADAPTER *iocp = NULL; + + if (cmd == CPQFCTS_SCSI_PASSTHRU) { + /* Update the iocnum */ + if (copy_from_user(&iocnumX, (int *)arg, sizeof(int))) { + printk(KERN_ERR "%s::mptctl_compaq_ioctl() @%d - " + "Unable to read controller number @ %p\n", + __FILE__, __LINE__, (void*)arg); + return -EFAULT; + } + iocnumX &= 0xFF; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) { + printk(KERN_ERR "%s::mptctl_compaq_ioctl() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnumX); + return -ENODEV; + } + + /* All of these commands require an interrupt or + * are unknown/illegal. + */ + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + dctlprintk((MYIOC_s_INFO_FMT ": mptctl_compaq_ioctl()\n", iocp->name)); + + switch(cmd) { + case CPQFCTS_GETPCIINFO: + ret = mptctl_cpq_getpciinfo(arg); + break; + case CPQFCTS_GETDRIVER: + ret = mptctl_cpq_getdriver(arg); + break; + case CPQFCTS_CTLR_STATUS: + ret = mptctl_cpq_ctlr_status(arg); + break; + case CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS: + ret = mptctl_cpq_target_address(arg); + break; + case CPQFCTS_SCSI_PASSTHRU: + ret = mptctl_cpq_passthru(arg); + break; + default: + ret = -EINVAL; + } + + up(&mptctl_syscall_sem_ioc[iocp->id]); + + return ret; + +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_getpciinfo - Get PCI Information in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ +static int +mptctl_cpq_getpciinfo(unsigned long arg) +{ + cpqfc_pci_info_struct *uarg = (cpqfc_pci_info_struct *) arg; + cpqfc_pci_info_struct karg; + MPT_ADAPTER *ioc; + struct pci_dev *pdev; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + int iocnum = 0, iocnumX = 0; + dma_addr_t buf_dma; + u8 *pbuf = NULL; + int failed; + + dctlprintk((": mptctl_cpq_pciinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(cpqfc_pci_info_struct))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_pciinfo - " + "Unable to read in cpqfc_pci_info_struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_pciinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + pdev = (struct pci_dev *) ioc->pcidev; + + /* Populate the structure. */ + karg.bus = pdev->bus->number; + karg.bus_type = 1; /* 1 = PCI; 4 = unknown */ + karg.device_fn = PCI_FUNC(pdev->devfn); + karg.slot_number = PCI_SLOT(pdev->devfn); + karg.vendor_id = pdev->vendor; + karg.device_id = pdev->device; + karg.board_id = (karg.device_id | (karg.vendor_id << 16)); + karg.class_code = pdev->class; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + karg.sub_vendor_id = pdev->subsystem_vendor; + karg.sub_device_id = pdev->subsystem_device; +#endif + + /* Issue a config request to get the device serial number + */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; /* read */ + cfg.timeout = 10; + + failed = 1; + + if (mpt_config(ioc, &cfg) == 0) { + if (cfg.hdr->PageLength > 0) { + /* Issue the second config page request */ + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) == 0) { + ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; + strncpy(karg.serial_number, pdata->BoardTracerNumber, 17); + failed = 0; + } + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + } + if (failed) + strncpy(karg.serial_number, " ", 17); + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(cpqfc_pci_info_struct))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_pciinfo - " + "Unable to write out cpqfc_pci_info_struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_getdriver - Get Driver Version in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_cpq_getdriver(unsigned long arg) +{ + int *uarg = (int *)arg; + int karg; + MPT_ADAPTER *ioc = NULL; + int iocnum = 0, iocnumX = 0; + int ii, jj; + char version[10]; + char val; + char *vptr = NULL; + char *pptr = NULL; + + dctlprintk((": mptctl_cpq_getdriver called.\n")); + if (copy_from_user(&karg, uarg, sizeof(int))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_getdriver - " + "Unable to read in struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_getdriver() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + strncpy(version, MPT_LINUX_VERSION_COMMON, 8); + + karg = 0; + vptr = version; + ii = 3; + while (ii > 0) { + pptr = strchr(vptr, '.'); + if (pptr) { + *pptr = '\0'; + val = 0; + for (jj=0; vptr[jj]>='0' && vptr[jj]<='9'; jj++) + val = 10 * val + (vptr[jj] - '0'); + karg |= (val << (8*ii)); + pptr++; + vptr = pptr; + } else + break; + ii--; + } + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(int))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_getdriver - " + "Unable to write out stuct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_ctlr_status - Get controller status in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ static int -mpt_ioctl_rwperf_reset(unsigned long arg) +mptctl_cpq_ctlr_status(unsigned long arg) { - struct mpt_raw_r_w kPerfInfo; - /* NOTE: local copy, on stack==KERNEL_SPACE! */ - MPT_ADAPTER *iocp; - int ioc; -// u8 targ; -// u8 lun; - int T, L; - int i; - char *myname = "_rwperf_reset()"; - - dprintk((KERN_INFO "%s - starting...\n", myname)); - - /* Get MPT adapter id. */ - if ((ioc = mpt_ioctl_rwperf_init(&kPerfInfo, arg, myname, &iocp)) < 0) - return ioc; - - /* set perf parameters from input */ -// targ = kPerfInfo.target & 0xFF; -// lun = kPerfInfo.lun & 0x1F; - - rwperf_reset = 1; - for (i=0; i < 1000000; i++) { - mb(); - schedule(); - barrier(); - } - rwperf_reset = 0; - - for (T=0; T < myMAX_TARGETS; T++) - for (L=0; L < myMAX_LUNS; L++) - if (DevIosCount[T][L]) { - printk(KERN_INFO "%s: ioc%d->00:%02x:%02x, " - "IosCnt RESET! (from %d to 0)\n", - myname, ioc, T, L, DevIosCount[T][L] ); - DevIosCount[T][L] = 0; - DevInUse[T][L] = 0; + cpqfc_ctlr_status *uarg = (cpqfc_ctlr_status *) arg; + cpqfc_ctlr_status karg; + MPT_ADAPTER *ioc; + int iocnum = 0, iocnumX = 0; + + dctlprintk((": mptctl_cpq_pciinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(cpqfc_ctlr_status))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_ctlr_status - " + "Unable to read in cpqfc_ctlr_status @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_ctlr_status() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + karg.status = ioc->last_state; + karg.offline_reason = 0; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(cpqfc_ctlr_status))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_ctlr_status - " + "Unable to write out cpqfc_ctlr_status @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_target_address - Get WWN Information in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ +static int +mptctl_cpq_target_address(unsigned long arg) +{ + Scsi_FCTargAddress *uarg = (Scsi_FCTargAddress *) arg; + Scsi_FCTargAddress karg; + MPT_ADAPTER *ioc; + int iocnum = 0, iocnumX = 0; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + dma_addr_t buf_dma; + u8 *pbuf = NULL; + FCPortPage0_t *ppp0; + int ii, failed; + u32 low, high; + + dctlprintk((": mptctl_cpq_target_address called.\n")); + if (copy_from_user(&karg, uarg, sizeof(Scsi_FCTargAddress))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_target_address - " + "Unable to read in Scsi_FCTargAddress @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_target_address() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + karg.host_port_id = 0; + + /* Issue a config request to get the device wwn + */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; /* read */ + cfg.timeout = 10; + + failed = 1; + + if (mpt_config(ioc, &cfg) == 0) { + if (cfg.hdr->PageLength > 0) { + /* Issue the second config page request */ + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) == 0) { + ppp0 = (FCPortPage0_t *) pbuf; + + low = le32_to_cpu(ppp0->WWNN.Low); + high = le32_to_cpu(ppp0->WWNN.High); + + for (ii = 0; ii < 4; ii++) { + karg.host_wwn[7-ii] = low & 0xFF; + karg.host_wwn[3-ii] = high & 0xFF; + low = (low >> 8); + high = (high >> 8); + } + failed = 0; + } + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; } + } + } + + if (failed) { + for (ii = 7; ii >= 0; ii--) + karg.host_wwn[ii] = 0; + } + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(Scsi_FCTargAddress))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_target_address - " + "Unable to write out Scsi_FCTargAddress @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_passthru - Construct and issue a SCSI IO Passthru + * + * Requires the SCSI host driver to be loaded. + * I386 version. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ static int -mpt_ioctl_scsi_cmd(unsigned long arg) +mptctl_cpq_passthru(unsigned long arg) { - return -ENOSYS; + VENDOR_IOCTL_REQ *uarg = (VENDOR_IOCTL_REQ *) arg; + VENDOR_IOCTL_REQ karg; + cpqfc_passthru_t kpass; + MPT_ADAPTER *ioc; + int iocnum = 0, iocnumX = 0; + int rc; + + dctlprintk((": mptctl_cpq_passthru called.\n")); + if (copy_from_user(&karg, uarg, sizeof(VENDOR_IOCTL_REQ))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_passthru - " + "Unable to read in VENDOR_IOCTL_REQ @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + /* Set the IOC number */ + iocnumX = karg.lc & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_passthru() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + if (ioc->sh == NULL) { + printk(KERN_ERR "%s::mptctl_cpq_passthru() @%d - SCSI Host driver not loaded!\n", + __FILE__, __LINE__); + return -EFAULT; + } + + /* Read in the second buffer */ + if (copy_from_user(&kpass, uarg->argp, sizeof(cpqfc_passthru_t))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_passthru - " + "Unable to read in cpqfc_passthru_t @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + + /* Generate the SCSI IO command and issue */ + rc = mptctl_compaq_scsiio(&karg, &kpass); + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_compaq_scsiio - Reformat Compaq structures into driver structures + * Call the generic _do_mpt_command function. + * + * Requires the SCSI host driver to be loaded. + * I386 version. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ +static int +mptctl_compaq_scsiio(VENDOR_IOCTL_REQ *pVenReq, cpqfc_passthru_t *pPass) +{ + struct mpt_ioctl_command karg; + SCSIIORequest_t request ; + SCSIIORequest_t *pMf; + int ii, rc; + u8 opcode; + + /* Fill in parameters to karg */ + karg.hdr.iocnum = pVenReq->lc; + karg.hdr.port = 0; + karg.hdr.maxDataSize = 0; /* not used */ + karg.timeout = 0; /* use default */ + + karg.replyFrameBufPtr = NULL; /* no reply data */ + karg.maxReplyBytes = 0; + + karg.senseDataPtr = pPass->sense_data; + karg.maxSenseBytes = pPass->sense_len; /* max is 40 */ + + if (pPass->rw_flag == MPT_COMPAQ_WRITE) { + karg.dataOutBufPtr = pPass->bufp; + karg.dataOutSize = pPass->len; + karg.dataInBufPtr = NULL; + karg.dataInSize = 0; + } else { + karg.dataInBufPtr = pPass->bufp; + karg.dataInSize = pPass->len; + karg.dataOutBufPtr = NULL; + karg.dataOutSize = 0; + } + + karg.dataSgeOffset = (sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION))/4; + + /* Construct the Message frame */ + pMf = &request; + + pMf->TargetID = (u8) pVenReq->ld; /* ???? FIXME */ + pMf->Bus = (u8) pPass->bus; + pMf->ChainOffset = 0; + pMf->Function = MPI_FUNCTION_SCSI_IO_REQUEST; + + /* May need some tweaking here */ + opcode = (u8) pPass->cdb[0]; + if (opcode < 0x20) + pMf->CDBLength = 6; + else if (opcode < 0x60) + pMf->CDBLength = 10; + else if ((opcode < 0xC0) && (opcode >= 0xA0)) + pMf->CDBLength = 12; + else + pMf->CDBLength = 16; + + pMf->SenseBufferLength = karg.maxSenseBytes; /* max is 40 */ + pMf->Reserved = 0; + pMf->MsgFlags = 0; /* set later */ + pMf->MsgContext = 0; /* set later */ + + for (ii = 0; ii < 8; ii++) + pMf->LUN[ii] = 0; + pMf->LUN[1] = 0; /* ???? FIXME */ + + /* Tag values set by _do_mpt_command */ + if (pPass->rw_flag == MPT_COMPAQ_WRITE) + pMf->Control = MPI_SCSIIO_CONTROL_WRITE; + else + pMf->Control = MPI_SCSIIO_CONTROL_READ; + + for (ii = 0; ii < 16; ii++) + pMf->CDB[ii] = pPass->cdb[ii]; + + pMf->DataLength = pPass->len; + + /* All remaining fields are set by the next function + */ + rc = mptctl_do_mpt_command (karg, (char *)pMf, 1); + return rc; } + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,51) @@ -1111,7 +2750,7 @@ llseek: no_llseek, read: mptctl_read, write: mptctl_write, - ioctl: mpt_ioctl, + ioctl: mptctl_ioctl, open: mptctl_open, release: mptctl_release, }; @@ -1134,18 +2773,15 @@ unsigned long, struct file *)); int unregister_ioctl32_conversion(unsigned int cmd); - -struct mpt_fw_xfer32 { - unsigned int iocnum; - unsigned int fwlen; - u32 bufp; -}; - -#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32) - extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* sparc32_XXX functions are used to provide a conversion between + * pointers and u32's. If the arg does not contain any pointers, then + * a specialized function (sparc32_XXX) is not needed. If the arg + * does contain pointer(s), then the specialized function is used + * to ensure the structure contents is properly processed by mptctl. + */ static int sparc32_mptfwxfer_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *filp) @@ -1157,7 +2793,7 @@ int nonblock = (filp->f_flags & O_NONBLOCK); int ret; - dprintk((KERN_INFO MYNAM "::sparc32_mptfwxfer_ioctl() called\n")); + dctlprintk((KERN_INFO MYNAM "::sparc32_mptfwxfer_ioctl() called\n")); if (copy_from_user(&kfw32, (char *)arg, sizeof(kfw32))) return -EFAULT; @@ -1178,13 +2814,131 @@ kfw.fwlen = kfw32.fwlen; kfw.bufp = (void *)(unsigned long)kfw32.bufp; - ret = mpt_ioctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + + up(&mptctl_syscall_sem_ioc[iocp->id]); + + return ret; +} + +static int +sparc32_mpt_command(unsigned int fd, unsigned int cmd, + unsigned long arg, struct file *filp) +{ + struct mpt_ioctl_command32 karg32; + struct mpt_ioctl_command32 *uarg = (struct mpt_ioctl_command32 *) arg; + struct mpt_ioctl_command karg; + MPT_ADAPTER *iocp = NULL; + int iocnum, iocnumX; + int nonblock = (filp->f_flags & O_NONBLOCK); + int ret; + + dctlprintk((KERN_INFO MYNAM "::sparc32_mpt_command() called\n")); + + if (copy_from_user(&karg32, (char *)arg, sizeof(karg32))) + return -EFAULT; + + /* Verify intended MPT adapter */ + iocnumX = karg32.hdr.iocnum & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) { + printk(KERN_ERR MYNAM "::sparc32_mpt_command @%d - ioc%d not found!\n", + __LINE__, iocnumX); + return -ENODEV; + } + + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + /* Copy data to karg */ + karg.hdr.iocnum = karg32.hdr.iocnum; + karg.hdr.port = karg32.hdr.port; + karg.timeout = karg32.timeout; + karg.maxReplyBytes = karg32.maxReplyBytes; + + karg.dataInSize = karg32.dataInSize; + karg.dataOutSize = karg32.dataOutSize; + karg.maxSenseBytes = karg32.maxSenseBytes; + karg.dataSgeOffset = karg32.dataSgeOffset; + + karg.replyFrameBufPtr = (char *)(unsigned long)karg32.replyFrameBufPtr; + karg.dataInBufPtr = (char *)(unsigned long)karg32.dataInBufPtr; + karg.dataOutBufPtr = (char *)(unsigned long)karg32.dataOutBufPtr; + karg.senseDataPtr = (char *)(unsigned long)karg32.senseDataPtr; + + /* Pass new structure to do_mpt_command + */ + ret = mptctl_do_mpt_command (karg, (char *) &uarg->MF, 0); up(&mptctl_syscall_sem_ioc[iocp->id]); return ret; } +static int +sparc32_mptctl_cpq_passthru(unsigned int fd, unsigned int cmd, + unsigned long arg, struct file *filp) +{ + VENDOR_IOCTL_REQ32 *uarg = (VENDOR_IOCTL_REQ32 *) arg; + VENDOR_IOCTL_REQ32 karg32; + VENDOR_IOCTL_REQ karg; + cpqfc_passthru32_t kpass32; + cpqfc_passthru_t kpass; + MPT_ADAPTER *ioc; + int nonblock = (filp->f_flags & O_NONBLOCK); + int iocnum = 0, iocnumX = 0; + int rc; + int ii; + + dctlprintk((KERN_INFO MYNAM "::sparc32_mptctl_cpq_passthru() called\n")); + + if (copy_from_user(&karg32, (char *)arg, sizeof(karg32))) + return -EFAULT; + + /* Verify intended MPT adapter */ + iocnumX = karg32.lc & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR MYNAM "::sparc32_mpt_command @%d - ioc%d not found!\n", + __LINE__, iocnumX); + return -ENODEV; + } + + if ((rc = mptctl_syscall_down(ioc, nonblock)) != 0) + return rc; + + /* Copy data to karg */ + karg.ld = karg32.ld; + karg.node = karg32.node; + karg.lc = karg32.lc; + karg.nexus = karg32.nexus; + karg.argp = (void *)(unsigned long)karg32.argp; + + /* Read in the second buffer */ + if (copy_from_user(&kpass32, karg.argp, sizeof(cpqfc_passthru32_t))) { + printk(KERN_ERR "%s@%d::sparc32_mptctl_cpq_passthru - " + "Unable to read in cpqfc_passthru_t @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + /* Copy the 32bit buffer to kpass */ + for (ii = 0; ii < 16; ii++) + kpass.cdb[ii] = kpass32.cdb[ii]; + kpass.bus = kpass32.bus; + kpass.pdrive = kpass32.pdrive; + kpass.len = kpass32.len; + kpass.sense_len = kpass32.sense_len; + kpass.bufp = (void *)(unsigned long)kpass32.bufp; + kpass.rw_flag = kpass32.rw_flag; + + /* Generate the SCSI IO command and issue */ + rc = mptctl_compaq_scsiio(&karg, &kpass); + + up(&mptctl_syscall_sem_ioc[ioc->id]); + return rc; +} + #endif /*} linux >= 2.3.x */ #endif /*} sparc */ @@ -1194,26 +2948,76 @@ int err; int i; int where = 1; + int sz; + u8 *mem; + MPT_ADAPTER *ioc = NULL; + int iocnum; show_mptmod_ver(my_NAME, my_VERSION); for (i=0; iioctl = (MPT_IOCTL *) mem; + ioc->ioctl->ioc = ioc; + init_timer (&ioc->ioctl->timer); + ioc->ioctl->timer.data = (unsigned long) ioc->ioctl; + ioc->ioctl->timer.function = mptctl_timer_expired; + } } #if defined(__sparc__) && defined(__sparc_v9__) /*{*/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) /*{*/ - err = register_ioctl32_conversion(MPTRWPERF, NULL); + err = register_ioctl32_conversion(MPTIOCINFO, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTTARGETINFO, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTTEST, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTEVENTQUERY, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTEVENTENABLE, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTEVENTREPORT, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTHARDRESET, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTCOMMAND32, sparc32_mpt_command); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTFWDOWNLOAD32, + sparc32_mptfwxfer_ioctl); if (++where && err) goto out_fail; - err = register_ioctl32_conversion(MPTRWPERF_CHK, NULL); + err = register_ioctl32_conversion(CPQFCTS_GETPCIINFO, NULL); if (++where && err) goto out_fail; - err = register_ioctl32_conversion(MPTRWPERF_RESET, NULL); + err = register_ioctl32_conversion(CPQFCTS_CTLR_STATUS, NULL); if (++where && err) goto out_fail; - err = register_ioctl32_conversion(MPTFWDOWNLOAD32, sparc32_mptfwxfer_ioctl); + err = register_ioctl32_conversion(CPQFCTS_GETDRIVER, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(CPQFCTS_SCSI_PASSTHRU32, sparc32_mptctl_cpq_passthru); if (++where && err) goto out_fail; #endif /*} linux >= 2.3.x */ #endif /*} sparc */ + /* Register this device */ if (misc_register(&mptctl_miscdev) == -1) { printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR); err = -EBUSY; @@ -1227,13 +3031,19 @@ * Install our handler */ ++where; - if ((mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER)) <= 0) { + if ((mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER)) < 0) { printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); misc_deregister(&mptctl_miscdev); err = -EBUSY; goto out_fail; } + if (mpt_reset_register(mptctl_id, mptctl_ioc_reset) == 0) { + dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); + } else { + /* FIXME! */ + } + return 0; out_fail: @@ -1242,35 +3052,72 @@ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) /*{*/ printk(KERN_ERR MYNAM ": ERROR: Failed to register ioctl32_conversion!" " (%d:err=%d)\n", where, err); - unregister_ioctl32_conversion(MPTRWPERF); - unregister_ioctl32_conversion(MPTRWPERF_CHK); - unregister_ioctl32_conversion(MPTRWPERF_RESET); + unregister_ioctl32_conversion(MPTIOCINFO); + unregister_ioctl32_conversion(MPTTARGETINFO); + unregister_ioctl32_conversion(MPTTEST); + unregister_ioctl32_conversion(MPTEVENTQUERY); + unregister_ioctl32_conversion(MPTEVENTENABLE); + unregister_ioctl32_conversion(MPTEVENTREPORT); + unregister_ioctl32_conversion(MPTHARDRESET); + unregister_ioctl32_conversion(MPTCOMMAND32); unregister_ioctl32_conversion(MPTFWDOWNLOAD32); + unregister_ioctl32_conversion(CPQFCTS_GETPCIINFO); + unregister_ioctl32_conversion(CPQFCTS_GETDRIVER); + unregister_ioctl32_conversion(CPQFCTS_CTLR_STATUS); + unregister_ioctl32_conversion(CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS); + unregister_ioctl32_conversion(CPQFCTS_SCSI_PASSTHRU32); #endif /*} linux >= 2.3.x */ #endif /*} sparc */ + for (i=0; iioctl) { + kfree ( ioc->ioctl ); + ioc->ioctl = NULL; + } + } + } return err; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ void mptctl_exit(void) { - -#if defined(__sparc__) && defined(__sparc_v9__) /*{*/ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) /*{*/ - unregister_ioctl32_conversion(MPTRWPERF); - unregister_ioctl32_conversion(MPTRWPERF_CHK); - unregister_ioctl32_conversion(MPTRWPERF_RESET); - unregister_ioctl32_conversion(MPTFWDOWNLOAD32); -#endif /*} linux >= 2.3.x */ -#endif /*} sparc */ + int i; + MPT_ADAPTER *ioc; + int iocnum; misc_deregister(&mptctl_miscdev); - printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n", + printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n", mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); - printk(KERN_INFO MYNAM ": Deregistered from Fusion MPT base driver\n"); + /* De-register reset handler from base module */ + mpt_reset_deregister(mptctl_id); + dprintk((KERN_INFO MYNAM ": Deregistered for IOC reset notifications\n")); + + /* De-register callback handler from base module */ mpt_deregister(mptctl_id); + printk(KERN_INFO MYNAM ": Deregistered from Fusion MPT base driver\n"); + + /* Free allocated memory */ + for (i=0; iioctl) { + kfree ( ioc->ioctl ); + ioc->ioctl = NULL; + } + } + } } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff -Nru a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/message/fusion/mptctl.h Fri Apr 26 00:01:27 2002 @@ -0,0 +1,395 @@ +/* + * linux/drivers/message/fusion/mptioctl.h + * Fusion MPT misc device (ioctl) driver. + * For use with PCI chip/adapter(s): + * LSIFC9xx/LSI409xx Fibre Channel + * running LSI Logic Fusion MPT (Message Passing Technology) firmware. + * + * Credits: + * This driver would not exist if not for Alan Cox's development + * of the linux i2o driver. + * + * A huge debt of gratitude is owed to David S. Miller (DaveM) + * for fixing much of the stupid and broken stuff in the early + * driver while porting to sparc64 platform. THANK YOU! + * + * (see also mptbase.c) + * + * Copyright (c) 1999-2002 LSI Logic Corporation + * Originally By: Steven J. Ralston + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) + * + * $Id: mptctl.h,v 1.2 2002/03/19 23:05:36 awilliam Exp $ + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef MPTCTL_H_INCLUDED +#define MPTCTL_H_INCLUDED +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "linux/version.h" + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * + */ +#define MPT_MISCDEV_BASENAME "mptctl" +#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME + +#define MPT_PRODUCT_LENGTH 12 + +/* + * Generic MPT Control IOCTLs and structures + */ +#define MPT_MAGIC_NUMBER 'm' + +#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w) + +#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer) +#define MPTCOMMAND _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command) + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32) +#define MPTCOMMAND32 _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command32) +#endif /*}*/ + +#define MPTIOCINFO _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo) +#define MPTTARGETINFO _IOWR(MPT_MAGIC_NUMBER,18,struct mpt_ioctl_targetinfo) +#define MPTTEST _IOWR(MPT_MAGIC_NUMBER,19,struct mpt_ioctl_test) +#define MPTEVENTQUERY _IOWR(MPT_MAGIC_NUMBER,21,struct mpt_ioctl_eventquery) +#define MPTEVENTENABLE _IOWR(MPT_MAGIC_NUMBER,22,struct mpt_ioctl_eventenable) +#define MPTEVENTREPORT _IOWR(MPT_MAGIC_NUMBER,23,struct mpt_ioctl_eventreport) +#define MPTHARDRESET _IOWR(MPT_MAGIC_NUMBER,24,struct mpt_ioctl_diag_reset) +#define MPTFWREPLACE _IOWR(MPT_MAGIC_NUMBER,25,struct mpt_ioctl_replace_fw) + +/* + * SPARC PLATFORM REMARK: + * IOCTL data structures that contain pointers + * will have different sizes in the driver and applications + * (as the app. will not use 8-byte pointers). + * Apps should use MPTFWDOWNLOAD and MPTCOMMAND. + * The driver will convert data from + * mpt_fw_xfer32 (mpt_ioctl_command32) to mpt_fw_xfer (mpt_ioctl_command) + * internally. + */ +struct mpt_fw_xfer { + unsigned int iocnum; /* IOC unit number */ + unsigned int fwlen; + void *bufp; /* Pointer to firmware buffer */ +}; + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +struct mpt_fw_xfer32 { + unsigned int iocnum; + unsigned int fwlen; + u32 bufp; +}; +#endif /*}*/ + + +/* + * IOCTL header structure. + * iocnum - must be defined. + * port - must be defined for all IOCTL commands other than MPTIOCINFO + * maxDataSize - ignored on MPTCOMMAND commands + * - ignored on MPTFWREPLACE commands + * - on query commands, reports the maximum number of bytes to be returned + * to the host driver (count includes the header). + * That is, set to sizeof(struct mpt_ioctl_iocinfo) for fixed sized commands. + * Set to sizeof(struct mpt_ioctl_targetinfo) + datasize for variable + * sized commands. (MPTTARGETINFO, MPTEVENTREPORT) + */ +typedef struct _mpt_ioctl_header { + unsigned int iocnum; /* IOC unit number */ + unsigned int port; /* IOC port number */ + int maxDataSize; /* Maximum Num. bytes to transfer on read */ +} mpt_ioctl_header; + +/* + * Issue a diagnostic reset + */ +struct mpt_ioctl_diag_reset { + mpt_ioctl_header hdr; +}; + + +/* + * Adapter Information Page + * Read only. + * Data starts at offset 0xC + */ +#define MPT_IOCTL_INTERFACE_FC (0x01) +#define MPT_IOCTL_INTERFACE_SCSI (0x00) +#define MPT_IOCTL_VERSION_LENGTH (32) + +struct mpt_ioctl_iocinfo { + mpt_ioctl_header hdr; + int adapterType; /* SCSI or FCP */ + int port; /* port number */ + int pciId; /* PCI Id. */ + int hwRev; /* hardware revision */ + int subSystemDevice; /* PCI subsystem Device ID */ + int subSystemVendor; /* PCI subsystem Vendor ID */ + int numDevices; /* number of devices */ + int FWVersion; /* FW Version (integer) */ + int BIOSVersion; /* BIOS Version (integer) */ + char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */ + char busChangeEvent; + char hostId; + char rsvd[2]; +}; + +/* + * Device Information Page + * Report the number of, and ids of, all targets + * on this IOC. The ids array is a packed structure + * of the known targetInfo. + * bits 31-24: reserved + * 23-16: LUN + * 15- 8: Bus Number + * 7- 0: Target ID + */ +struct mpt_ioctl_targetinfo { + mpt_ioctl_header hdr; + int numDevices; /* Num targets on this ioc */ + int targetInfo[1]; +}; + + +/* + * Event reporting IOCTL's. These IOCTL's will + * use the following defines: + */ +struct mpt_ioctl_eventquery { + mpt_ioctl_header hdr; + unsigned short eventEntries; + unsigned short reserved; + unsigned int eventTypes; +}; + +struct mpt_ioctl_eventenable { + mpt_ioctl_header hdr; + unsigned int eventTypes; +}; + +#ifndef __KERNEL__ +typedef struct { + uint event; + uint eventContext; + uint data[2]; +} MPT_IOCTL_EVENTS; +#endif + +struct mpt_ioctl_eventreport { + mpt_ioctl_header hdr; + MPT_IOCTL_EVENTS eventData[1]; +}; + +#define MPT_MAX_NAME 32 +struct mpt_ioctl_test { + mpt_ioctl_header hdr; + u8 name[MPT_MAX_NAME]; + int chip_type; + u8 product [MPT_PRODUCT_LENGTH]; +}; + +/* Replace the FW image cached in host driver memory + * newImageSize - image size in bytes + * newImage - first byte of the new image + */ +typedef struct mpt_ioctl_replace_fw { + mpt_ioctl_header hdr; + int newImageSize; + u8 newImage[1]; +} mpt_ioctl_replace_fw_t; + +/* General MPT Pass through data strucutre + * + * iocnum + * timeout - in seconds, command timeout. If 0, set by driver to + * default value. + * replyFrameBufPtr - reply location + * dataInBufPtr - destination for read + * dataOutBufPtr - data source for write + * senseDataPtr - sense data location + * maxReplyBytes - maximum number of reply bytes to be sent to app. + * dataInSize - num bytes for data transfer in (read) + * dataOutSize - num bytes for data transfer out (write) + * dataSgeOffset - offset in words from the start of the request message + * to the first SGL + * MF[1]; + * + * Remark: Some config pages have bi-directional transfer, + * both a read and a write. The basic structure allows for + * a bidirectional set up. Normal messages will have one or + * both of these buffers NULL. + */ +struct mpt_ioctl_command { + mpt_ioctl_header hdr; + int timeout; /* optional (seconds) */ + char *replyFrameBufPtr; + char *dataInBufPtr; + char *dataOutBufPtr; + char *senseDataPtr; + int maxReplyBytes; + int dataInSize; + int dataOutSize; + int maxSenseBytes; + int dataSgeOffset; + char MF[1]; +}; + +/* + * SPARC PLATFORM: See earlier remark. + */ +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +struct mpt_ioctl_command32 { + mpt_ioctl_header hdr; + int timeout; + u32 replyFrameBufPtr; + u32 dataInBufPtr; + u32 dataOutBufPtr; + u32 senseDataPtr; + int maxReplyBytes; + int dataInSize; + int dataOutSize; + int maxSenseBytes; + int dataSgeOffset; + char MF[1]; +}; +#endif /*}*/ + + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + /* + * COMPAQ Specific IOCTL Defines and Structures + */ + +#define CPQFCTS_IOC_MAGIC 'Z' + +#define CPQFCTS_GETPCIINFO _IOR(CPQFCTS_IOC_MAGIC, 1, cpqfc_pci_info_struct) +#define CPQFCTS_GETDRIVER _IOR(CPQFCTS_IOC_MAGIC, 2, int) +#define CPQFCTS_CTLR_STATUS _IOR(CPQFCTS_IOC_MAGIC, 3, struct _cpqfc_ctlr_status) +#define CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS _IOR(CPQFCTS_IOC_MAGIC, 4, struct scsi_fctargaddress) +#define CPQFCTS_SCSI_PASSTHRU _IOWR(CPQFCTS_IOC_MAGIC, 5, VENDOR_IOCTL_REQ) +#if defined(__sparc__) && defined(__sparc_v9__) +#define CPQFCTS_SCSI_PASSTHRU32 _IOWR(CPQFCTS_IOC_MAGIC, 5, VENDOR_IOCTL_REQ32) +#endif + +typedef struct { + unsigned short bus; + unsigned short bus_type; + unsigned short device_fn; + u32 board_id; + u32 slot_number; + unsigned short vendor_id; + unsigned short device_id; + unsigned short class_code; + unsigned short sub_vendor_id; + unsigned short sub_device_id; + u8 serial_number[81]; +} cpqfc_pci_info_struct; + + +typedef struct scsi_fctargaddress { + unsigned int host_port_id; + u8 host_wwn[8]; /* WW Network Name */ +} Scsi_FCTargAddress; + +typedef struct _cpqfc_ctlr_status { + u32 status; + u32 offline_reason; +} cpqfc_ctlr_status; + + +/* Compaq SCSI I/O Passthru structures. + */ +#define MPT_COMPAQ_READ 0x26 +#define MPT_COMPAQ_WRITE 0x27 + +typedef struct { + int lc; /* controller number */ + int node; /* node number */ + int ld; /* target logical id */ + u32 nexus; + void *argp; +} VENDOR_IOCTL_REQ; + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +typedef struct { + int lc; /* controller number */ + int node; /* node number */ + int ld; /* target logical id */ + u32 nexus; + u32 argp; +} VENDOR_IOCTL_REQ32; +#endif + +typedef struct { + char cdb[16]; /* cdb */ + unsigned short bus; /* bus number */ + unsigned short pdrive; /* physical drive */ + int len; /* data area size */ + int sense_len; /* sense size */ + char sense_data[40]; /* sense buffer */ + void *bufp; /* data buffer pointer */ + char rw_flag; +} cpqfc_passthru_t; + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +typedef struct { + char cdb[16]; /* cdb */ + unsigned short bus; /* bus number */ + unsigned short pdrive; /* physical drive */ + int len; /* data area size */ + int sense_len; /* sense size */ + char sense_data[40]; /* sense buffer */ + u32 bufp; /* data buffer pointer */ + char rw_flag; +} cpqfc_passthru32_t; +#endif + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#endif + diff -Nru a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c --- a/drivers/message/fusion/mptlan.c Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/mptlan.c Fri Apr 26 00:01:27 2002 @@ -23,10 +23,10 @@ * * (see also mptbase.c) * - * Copyright (c) 2000-2001 LSI Logic Corporation + * Copyright (c) 2000-2002 LSI Logic Corporation * Originally By: Noah Romer * - * $Id: mptlan.c,v 1.32.2.2 2001/07/12 19:43:33 nromer Exp $ + * $Id: mptlan.c,v 1.51 2002/02/11 14:40:55 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -79,6 +79,8 @@ #define MYNAM "mptlan" +MODULE_LICENSE("GPL"); + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT LAN message sizes without variable part. @@ -109,8 +111,8 @@ MPT_ADAPTER *mpt_dev; u8 pnum; /* Port number in the IOC. This is not a Unix network port! */ - atomic_t buckets_out; /* number of unused buckets on IOC */ - int bucketthresh; /* Send more when this many used */ + atomic_t buckets_out; /* number of unused buckets on IOC */ + int bucketthresh; /* Send more when this many left */ int *mpt_txfidx; /* Free Tx Context list */ int mpt_txfidx_tail; @@ -123,8 +125,8 @@ struct BufferControl *RcvCtl; /* Receive BufferControl structs */ struct BufferControl *SendCtl; /* Send BufferControl structs */ - int max_buckets_out; /* Max buckets to send to IOC */ - int tx_max_out; /* IOC's Tx queue len */ + int max_buckets_out; /* Max buckets to send to IOC */ + int tx_max_out; /* IOC's Tx queue len */ u32 total_posted; u32 total_received; @@ -152,7 +154,8 @@ static int mpt_lan_reset(struct net_device *dev); static int mpt_lan_close(struct net_device *dev); static void mpt_lan_post_receive_buckets(void *dev_id); -static void mpt_lan_wake_post_buckets_task(struct net_device *dev); +static void mpt_lan_wake_post_buckets_task(struct net_device *dev, + int priority); static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); static int mpt_lan_receive_post_reply(struct net_device *dev, LANReceivePostReply_t *pRecvRep); @@ -175,8 +178,10 @@ static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1]; +#ifdef QLOGIC_NAA_WORKAROUND static struct NAA_Hosed *mpt_bad_naa = NULL; rwlock_t bad_naa_lock; +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -229,7 +234,7 @@ case LAN_REPLY_FORM_SEND_SINGLE: // dioprintk((MYNAM "/lan_reply: " // "calling mpt_lan_send_reply (turbo)\n")); - + // Potential BUG here? -sralston // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg); // If/when mpt_lan_send_turbo would return 1 here, @@ -333,7 +338,7 @@ struct net_device *dev = mpt_landev[ioc->id]; struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv; - dprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", + dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); if (priv->mpt_rxfidx == NULL) @@ -342,9 +347,11 @@ if (reset_phase == MPT_IOC_PRE_RESET) { int i; unsigned long flags; - + netif_stop_queue(dev); + dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name)); + atomic_set(&priv->buckets_out, 0); /* Reset Rx Free Tail index and re-populate the queue. */ @@ -365,7 +372,7 @@ static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { - dprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n")); + dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n")); switch (le32_to_cpu(pEvReply->Event)) { case MPI_EVENT_NONE: /* 00 */ @@ -403,9 +410,9 @@ if (mpt_lan_reset(dev) != 0) { MPT_ADAPTER *mpt_dev = priv->mpt_dev; - + printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed."); - + if (mpt_dev->active) printk ("The ioc is active. Perhaps it needs to be" " reset?\n"); @@ -429,7 +436,7 @@ priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i; } - dprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); + dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int), GFP_KERNEL); @@ -447,12 +454,12 @@ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; } -/**/ dprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); +/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); /**/ for (i = 0; i < priv->tx_max_out; i++) -/**/ dprintk((" %xh", priv->mpt_txfidx[i])); -/**/ dprintk(("\n")); +/**/ dlprintk((" %xh", priv->mpt_txfidx[i])); +/**/ dlprintk(("\n")); - dprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); + dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); mpt_lan_post_receive_buckets(dev); printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", @@ -466,7 +473,7 @@ } netif_start_queue(dev); - dprintk((KERN_INFO MYNAM "/lo: Done.\n")); + dlprintk((KERN_INFO MYNAM "/lo: Done.\n")); return 0; out_mpt_rxfidx: @@ -494,7 +501,7 @@ mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id); if (mf == NULL) { -/* dprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " +/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " "Unable to allocate a request frame.\n")); */ return -1; @@ -523,11 +530,11 @@ unsigned int timeout; int i; - dprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); + dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); mpt_event_deregister(LanCtx); - dprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " + dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " "since driver was loaded, %d still out\n", priv->total_posted,atomic_read(&priv->buckets_out))); @@ -537,18 +544,18 @@ timeout = 2 * HZ; while (atomic_read(&priv->buckets_out) && --timeout) { - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } for (i = 0; i < priv->max_buckets_out; i++) { if (priv->RcvCtl[i].skb != NULL) { -/**/ dprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " +/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " /**/ "is still out\n", i)); pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma, - priv->RcvCtl[i].len, + priv->RcvCtl[i].len, PCI_DMA_FROMDEVICE); - dev_kfree_skb(priv->RcvCtl[i].skb); + dev_kfree_skb(priv->RcvCtl[i].skb); } } @@ -556,11 +563,11 @@ kfree (priv->mpt_rxfidx); for (i = 0; i < priv->tx_max_out; i++) { - if (priv->SendCtl[i].skb != NULL) { + if (priv->SendCtl[i].skb != NULL) { pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma, priv->SendCtl[i].len, PCI_DMA_TODEVICE); - dev_kfree_skb(priv->SendCtl[i].skb); + dev_kfree_skb(priv->SendCtl[i].skb); } } @@ -599,7 +606,13 @@ static void mpt_lan_tx_timeout(struct net_device *dev) { - netif_wake_queue(dev); + struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv; + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + + if (mpt_dev->active) { + dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name)); + netif_wake_queue(dev); + } } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -722,7 +735,6 @@ dma_addr_t dma; unsigned long flags; int ctx; - struct NAA_Hosed *nh; u16 cur_naa = 0x1000; dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", @@ -741,7 +753,6 @@ mf = mpt_get_msg_frame(LanCtx, mpt_dev->id); if (mf == NULL) { netif_stop_queue(dev); - dev_kfree_skb(skb); spin_unlock_irqrestore(&priv->txfidx_lock, flags); printk (KERN_ERR "%s: Unable to alloc request frame\n", @@ -791,6 +802,10 @@ // IOC_AND_NETDEV_NAMES_s_s(dev), // ctx, skb, skb->data)); +#ifdef QLOGIC_NAA_WORKAROUND +{ + struct NAA_Hosed *nh; + /* Munge the NAA for Tx packets to QLogic boards, which don't follow RFC 2625. The longer I look at this, the more my opinion of Qlogic drops. */ @@ -803,12 +818,14 @@ (nh->ieee[4] == skb->mac.raw[4]) && (nh->ieee[5] == skb->mac.raw[5])) { cur_naa = nh->NAA; - dprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " + dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " "= %04x.\n", cur_naa)); break; } } read_unlock_irq(&bad_naa_lock); +} +#endif pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | (skb->mac.raw[0] << 8) | @@ -821,10 +838,10 @@ pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; /* If we ever decide to send more than one Simple SGE per LANSend, then - we will need to make sure that LAST_ELEMENT only gets set on the + we will need to make sure that LAST_ELEMENT only gets set on the last one. Otherwise, bad voodoo and evil funkiness will commence. */ pSimple->FlagsLength = cpu_to_le32( - ((MPI_SGE_FLAGS_LAST_ELEMENT | + ((MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_SYSTEM_ADDRESS | @@ -842,23 +859,32 @@ dev->trans_start = jiffies; dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - le32_to_cpu(pSimple->FlagsLength))); + IOC_AND_NETDEV_NAMES_s_s(dev), + le32_to_cpu(pSimple->FlagsLength))); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static inline void -mpt_lan_wake_post_buckets_task(struct net_device *dev) +mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) +/* + * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue + */ { struct mpt_lan_priv *priv = dev->priv; - + if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { - queue_task(&priv->post_buckets_task, &tq_immediate); - mark_bh(IMMEDIATE_BH); - dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", - IOC_AND_NETDEV_NAMES_s_s(dev) )); + if (priority) { + queue_task(&priv->post_buckets_task, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } else { + queue_task(&priv->post_buckets_task, &tq_timer); + dioprintk((KERN_INFO MYNAM ": post_buckets queued on " + "timer.\n")); + } + dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", + IOC_AND_NETDEV_NAMES_s_s(dev) )); } } @@ -870,7 +896,7 @@ skb->protocol = mpt_lan_type_trans(skb, dev); - dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) " + dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) " "delivered to upper level.\n", IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); @@ -884,7 +910,7 @@ atomic_read(&priv->buckets_out))); if (atomic_read(&priv->buckets_out) < priv->bucketthresh) - mpt_lan_wake_post_buckets_task(dev); + mpt_lan_wake_post_buckets_task(dev, 1); dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets " "remaining, %d received back since sod\n", @@ -956,12 +982,12 @@ unsigned long flags; struct sk_buff *skb; u32 ctx; - u8 count; + int count; int i; count = pRecvRep->NumberOfContexts; -/**/ dprintk((KERN_INFO MYNAM "/receive_post_reply: " +/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: " "IOC returned %d buckets, freeing them...\n", count)); spin_lock_irqsave(&priv->rxfidx_lock, flags); @@ -970,11 +996,11 @@ skb = priv->RcvCtl[ctx].skb; -// dprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n", +// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n", // IOC_AND_NETDEV_NAMES_s_s(dev))); -// dprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p", -// priv, &(priv->buckets_out))); -// dprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); +// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p", +// priv, &(priv->buckets_out))); +// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); priv->RcvCtl[ctx].skb = NULL; pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, @@ -989,13 +1015,13 @@ // for (i = 0; i < priv->max_buckets_out; i++) // if (priv->RcvCtl[i].skb != NULL) -// dprintk((KERN_INFO MYNAM "@rpr: bucket %03x " +// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x " // "is still out\n", i)); -/* dprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", +/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", count)); */ -/**/ dprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " +/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " /**/ "remaining, %d received back since sod.\n", /**/ atomic_read(&priv->buckets_out), priv->total_received)); return 0; @@ -1010,9 +1036,9 @@ MPT_ADAPTER *mpt_dev = priv->mpt_dev; struct sk_buff *skb, *old_skb; unsigned long flags; - u32 len, ctx; - u32 offset; - u8 count; + u32 len, ctx, offset; + u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); + int count; int i, l; dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n")); @@ -1059,7 +1085,7 @@ if (!skb) { printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", IOC_AND_NETDEV_NAMES_s_s(dev), - __FILE__, __LINE__); + __FILE__, __LINE__); return -ENOMEM; } @@ -1096,7 +1122,7 @@ if (!skb) { printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", IOC_AND_NETDEV_NAMES_s_s(dev), - __FILE__, __LINE__); + __FILE__, __LINE__); return -ENOMEM; } @@ -1140,25 +1166,32 @@ "Arrgghh! We've done it again!\n"); } -#if 0 - { - u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); - if (remaining < priv->bucketthresh) - mpt_lan_wake_post_buckets_task(dev); - - if (remaining == 0) - printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " - "(priv->buckets_out = %d)\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - atomic_read(&priv->buckets_out)); - else - printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. " - "(priv->buckets_out = %d)\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - remaining, atomic_read(&priv->buckets_out)); + if (remaining == 0) + printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " + "(priv->buckets_out = %d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + atomic_read(&priv->buckets_out)); + else if (remaining < 10) + printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. " + "(priv->buckets_out = %d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + remaining, atomic_read(&priv->buckets_out)); + + if ((remaining < priv->bucketthresh) && + ((atomic_read(&priv->buckets_out) - remaining) > + MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) { + + printk (KERN_WARNING MYNAM " Mismatch between driver's " + "buckets_out count and fw's BucketsRemaining " + "count has crossed the threshold, issuing a " + "LanReset to clear the fw's hashtable. You may " + "want to check your /var/log/messages for \"CRC " + "error\" event notifications.\n"); + + mpt_lan_reset(dev); + mpt_lan_wake_post_buckets_task(dev, 0); } -#endif - + return mpt_lan_receive_skb(dev, skb); } @@ -1242,15 +1275,15 @@ if (skb == NULL) { skb = dev_alloc_skb(len); if (skb == NULL) { -/**/ printk (KERN_WARNING -/**/ MYNAM "/%s: Can't alloc skb\n", -/**/ __FUNCTION__); + printk (KERN_WARNING + MYNAM "/%s: Can't alloc skb\n", + __FUNCTION__); priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; spin_unlock_irqrestore(&priv->rxfidx_lock, flags); break; } - dma = pci_map_single(mpt_dev->pcidev, skb->data, + dma = pci_map_single(mpt_dev->pcidev, skb->data, len, PCI_DMA_FROMDEVICE); priv->RcvCtl[ctx].skb = skb; @@ -1308,7 +1341,7 @@ dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", - __FUNCTION__, priv->total_posted, priv->total_received)); + __FUNCTION__, priv->total_posted, priv->total_received)); clear_bit(0, &priv->post_buckets_active); } @@ -1336,7 +1369,7 @@ priv->post_buckets_task.data = dev; priv->post_buckets_active = 0; - dprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", + dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", __LINE__, dev->mtu + dev->hard_header_len + 4)); atomic_set(&priv->buckets_out, 0); @@ -1346,7 +1379,7 @@ if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out) priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets; - dprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n", + dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n", __LINE__, mpt_dev->pfacts[0].MaxLanBuckets, max_buckets_out, @@ -1389,7 +1422,7 @@ dev->tx_timeout = mpt_lan_tx_timeout; dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; - dprintk((KERN_INFO MYNAM ": Finished registering dev " + dlprintk((KERN_INFO MYNAM ": Finished registering dev " "and setting initial values\n")); SET_MODULE_OWNER(dev); @@ -1407,9 +1440,11 @@ show_mptmod_ver(LANAME, LANVER); - /* Init the global r/w lock for the bad_naa list. We want to do this +#ifdef QLOGIC_NAA_WORKAROUND + /* Init the global r/w lock for the bad_naa list. We want to do this before any boards are initialized and may be used. */ rwlock_init(&bad_naa_lock); +#endif if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) { printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n"); @@ -1419,10 +1454,10 @@ /* Set the callback index to be used by driver core for turbo replies */ mpt_lan_index = LanCtx; - dprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx)); + dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx)); if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) { - dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); + dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); } else { printk(KERN_ERR MYNAM ": Eieee! unable to register a reset " "handler with mptbase! The world is at an end! " @@ -1458,7 +1493,7 @@ // IOC_AND_NETDEV_NAMES_s_s(dev), // NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out); mpt_landev[j] = dev; - dprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n", + dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n", dev, j, mpt_landev[j])); j++; @@ -1508,18 +1543,15 @@ MODULE_PARM(tx_max_out_p, "i"); MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME! -MODULE_LICENSE("GPL"); - module_init(mpt_lan_init); module_exit(mpt_lan_exit); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static unsigned short -mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) +mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) { struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; struct fcllc *fcllc; - u16 source_naa = fch->stype, found = 0; skb->mac.raw = skb->data; skb_pull(skb, sizeof(struct mpt_lan_ohdr)); @@ -1535,7 +1567,7 @@ printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n", NETDEV_PTR_TO_IOC_NAME_s(dev)); printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", - fch->saddr[0], fch->saddr[1], fch->saddr[2], + fch->saddr[0], fch->saddr[1], fch->saddr[2], fch->saddr[3], fch->saddr[4], fch->saddr[5]); } @@ -1555,6 +1587,10 @@ fcllc = (struct fcllc *)skb->data; +#ifdef QLOGIC_NAA_WORKAROUND +{ + u16 source_naa = fch->stype, found = 0; + /* Workaround for QLogic not following RFC 2625 in regards to the NAA value. */ @@ -1562,15 +1598,15 @@ source_naa = swab16(source_naa); if (fcllc->ethertype == htons(ETH_P_ARP)) - dprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " + dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " "%04x.\n", source_naa)); - if ((fcllc->ethertype == htons(ETH_P_ARP)) && + if ((fcllc->ethertype == htons(ETH_P_ARP)) && ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){ struct NAA_Hosed *nh, *prevnh; int i; - dprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " + dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " "system with non-RFC 2625 NAA value (%04x).\n", source_naa)); @@ -1584,17 +1620,17 @@ (nh->ieee[4] == fch->saddr[4]) && (nh->ieee[5] == fch->saddr[5])) { found = 1; - dprintk ((KERN_INFO "mptlan/type_trans: ARP Re" + dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re" "q/Rep w/ bad NAA from system already" " in DB.\n")); break; } } - + if ((!found) && (nh == NULL)) { nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL); - dprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" + dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" " bad NAA from system not yet in DB.\n")); if (nh != NULL) { @@ -1603,11 +1639,11 @@ mpt_bad_naa = nh; if (prevnh) prevnh->next = nh; - + nh->NAA = source_naa; /* Set the S_NAA value. */ for (i = 0; i < FC_ALEN; i++) nh->ieee[i] = fch->saddr[i]; - dprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" + dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" "%02x:%02x with non-compliant S_NAA value.\n", fch->saddr[0], fch->saddr[1], fch->saddr[2], fch->saddr[3], fch->saddr[4],fch->saddr[5])); @@ -1622,9 +1658,10 @@ } write_unlock_irq(&bad_naa_lock); } - +} +#endif - /* Strip the SNAP header from ARP packets since we don't + /* Strip the SNAP header from ARP packets since we don't * pass them through to the 802.2/SNAP layers. */ if (fcllc->dsap == EXTENDED_SAP && diff -Nru a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h --- a/drivers/message/fusion/mptlan.h Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/mptlan.h Fri Apr 26 00:01:27 2002 @@ -21,6 +21,7 @@ #include #include #include +#include // #include #include @@ -43,13 +44,15 @@ #define MPT_LAN_MAX_BUCKETS_OUT 256 #define MPT_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */ +#define MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH 10 #define MPT_LAN_RX_COPYBREAK 200 -#define MPT_LAN_TX_TIMEOUT (1*HZ) +#define MPT_LAN_TX_TIMEOUT (1*HZ) #define MPT_TX_MAX_OUT_LIM 127 #define MPT_LAN_MIN_MTU 96 /* RFC2625 */ #define MPT_LAN_MAX_MTU 65280 /* RFC2625 */ -#define MPT_LAN_MTU 16128 /* be nice to slab allocator */ +#define MPT_LAN_MTU 13312 /* Max perf range + lower mem + usage than 16128 */ #define MPT_LAN_NAA_RFC2625 0x1 #define MPT_LAN_NAA_QLOGIC 0x2 @@ -64,6 +67,12 @@ #define dioprintk(x) printk x #else #define dioprintk(x) +#endif + +#ifdef MPT_LAN_DEBUG +#define dlprintk(x) printk x +#else +#define dlprintk(x) #endif #define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)(d)->priv) diff -Nru a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c --- a/drivers/message/fusion/mptscsih.c Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/mptscsih.c Fri Apr 26 00:01:27 2002 @@ -9,17 +9,24 @@ * This driver would not exist if not for Alan Cox's development * of the linux i2o driver. * + * A special thanks to Pamela Delaney (LSI Logic) for tons of work + * and countless enhancements while adding support for the 1030 + * chip family. Pam has been instrumental in the development of + * of the 2.xx.xx series fusion drivers, and her contributions are + * far too numerous to hope to list in one place. + * * A huge debt of gratitude is owed to David S. Miller (DaveM) * for fixing much of the stupid and broken stuff in the early * driver while porting to sparc64 platform. THANK YOU! * * (see mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Original author: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptscsih.c,v 1.29.4.1 2001/09/18 03:22:30 sralston Exp $ + * $Id: mptscsih.c,v 1.80 2002/02/27 18:44:27 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -65,7 +72,10 @@ #include #include #include -#include +#include /* for io_request_lock (spinlock) decl */ +#include /* for mdelay */ +#include /* needed for in_interrupt() proto */ +#include /* notifier code */ #include "../../scsi/scsi.h" #include "../../scsi/hosts.h" #include "../../scsi/sd.h" @@ -83,52 +93,131 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); +/* Set string for command line args from insmod */ +#ifdef MODULE +char *mptscsih = 0; +MODULE_PARM(mptscsih, "s"); +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ typedef struct _BIG_SENSE_BUF { - u8 data[256]; + u8 data[MPT_SENSE_BUFFER_ALLOC]; } BIG_SENSE_BUF; -typedef struct _MPT_SCSI_HOST { - MPT_ADAPTER *ioc; - int port; - struct scsi_cmnd **ScsiLookup; - u8 *SgHunks; - dma_addr_t SgHunksDMA; - u32 qtag_tick; -} MPT_SCSI_HOST; - -typedef struct _MPT_SCSI_DEV { - struct _MPT_SCSI_DEV *forw; - struct _MPT_SCSI_DEV *back; - MPT_ADAPTER *ioc; - int sense_sz; - BIG_SENSE_BUF CachedSense; - unsigned long io_cnt; - unsigned long read_cnt; -} MPT_SCSI_DEV; +#define MPT_SCANDV_GOOD (0x00000000) /* must be 0 */ +#define MPT_SCANDV_DID_RESET (0x00000001) +#define MPT_SCANDV_SENSE (0x00000002) +#define MPT_SCANDV_SOME_ERROR (0x00000004) +#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) + +#define MPT_SCANDV_MAX_RETRIES (10) + +#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */ +#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */ +#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */ +#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */ +#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occured with this command */ +#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */ + +typedef struct _internal_cmd { + char *data; /* data pointer */ + dma_addr_t data_dma; /* data dma address */ + int size; /* transfer size */ + u8 cmd; /* SCSI Op Code */ + u8 bus; /* bus number */ + u8 id; /* SCSI ID (virtual) */ + u8 lun; + u8 flags; /* Bit Field - See above */ + u8 physDiskNum; /* Phys disk number, -1 else */ + u8 rsvd2; + u8 rsvd; +} INTERNAL_CMD; + +typedef struct _negoparms { + u8 width; + u8 offset; + u8 factor; + u8 flags; +} NEGOPARMS; + +typedef struct _dv_parameters { + NEGOPARMS max; + NEGOPARMS now; + u8 cmd; + u8 id; + u16 pad1; +} DVPARAMETERS; + /* * Other private/forward protos... */ - static int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static void mptscsih_report_queue_full(Scsi_Cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq); static int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static int mptscsih_io_direction(Scsi_Cmnd *cmd); + +static int mptscsih_Add32BitSGE(MPT_SCSI_HOST *hd, Scsi_Cmnd *SCpnt, + SCSIIORequest_t *pReq, int req_idx); +static void mptscsih_AddNullSGE(SCSIIORequest_t *pReq); +static int mptscsih_getFreeChainBuffer(MPT_SCSI_HOST *hd, int *retIndex); +static void mptscsih_freeChainBuffers(MPT_SCSI_HOST *hd, int req_idx); +static int mptscsih_initChainBuffers (MPT_SCSI_HOST *hd, int init); + static void copy_sense_data(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); -static u32 SCPNT_TO_MSGCTX(Scsi_Cmnd *sc); +#ifndef MPT_SCSI_USE_NEW_EH +static void search_taskQ_for_cmd(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd); +#endif +static u32 SCPNT_TO_LOOKUP_IDX(Scsi_Cmnd *sc); +static MPT_FRAME_HDR *mptscsih_search_pendingQ(MPT_SCSI_HOST *hd, int scpnt_idx); +static void post_pendingQ_commands(MPT_SCSI_HOST *hd); + +static int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag); +static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag); static int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); static int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +static VirtDevice *mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen); +void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target); +static void clear_sense_flag(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq); +static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq, char *data); +static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags); +static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags); +static int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); +static void mptscsih_timer_expired(unsigned long data); +static void mptscsih_taskmgmt_timeout(unsigned long data); +static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); +static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum); + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io); +static void mptscsih_domainValidation(void *hd); +static void mptscsih_doDv(MPT_SCSI_HOST *hd, int portnum, int target); +static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage); +static void mptscsih_fillbuf(char *buffer, int size, int index, int width); +#endif +static int mptscsih_setup(char *str); +static int mptscsih_halt(struct notifier_block *nb, ulong event, void *buf); + +/* + * Reboot Notification + */ +static struct notifier_block mptscsih_notifier = { + mptscsih_halt, NULL, 0 +}; + +/* + * Private data... + */ static int mpt_scsi_hosts = 0; static atomic_t queue_depth; static int ScsiDoneCtx = -1; static int ScsiTaskCtx = -1; +static int ScsiScanDvCtx = -1; /* Used only for bus scan and dv */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,28) static struct proc_dir_entry proc_mpt_scsihost = @@ -141,23 +230,40 @@ }; #endif -#define SNS_LEN(scp) sizeof((scp)->sense_buffer) +#define SNS_LEN(scp) sizeof((scp)->sense_buffer) #ifndef MPT_SCSI_USE_NEW_EH /* * Stuff to handle single-threading SCSI TaskMgmt * (abort/reset) requests... */ -static spinlock_t mpt_scsih_taskQ_lock = SPIN_LOCK_UNLOCKED; -static MPT_Q_TRACKER mpt_scsih_taskQ = { - (MPT_FRAME_HDR*) &mpt_scsih_taskQ, - (MPT_FRAME_HDR*) &mpt_scsih_taskQ -}; -static int mpt_scsih_taskQ_cnt = 0; -static int mpt_scsih_taskQ_bh_active = 0; -static MPT_FRAME_HDR *mpt_scsih_active_taskmgmt_mf = NULL; +static spinlock_t mytaskQ_lock = SPIN_LOCK_UNLOCKED; +static int mytaskQ_bh_active = 0; +static struct tq_struct mptscsih_ptaskfoo; +static atomic_t mpt_taskQdepth; +#endif + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +/* + * Domain Validation task structure + */ +static spinlock_t dvtaskQ_lock = SPIN_LOCK_UNLOCKED; +static int dvtaskQ_active = 0; +static int dvtaskQ_release = 0; +static struct tq_struct mptscsih_dvTask; #endif +/* + * Wait Queue setup + */ +static DECLARE_WAIT_QUEUE_HEAD (scandv_waitq); +static int scandv_wait_done = 1; + +/* Driver default setup + */ +static struct mptscsih_driver_setup + driver_setup = MPTSCSIH_DRIVER_SETUP; + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptscsih_io_done - Main SCSI IO callback routine registered to @@ -174,123 +280,109 @@ * Returns 1 indicating alloc'd request frame ptr should be freed. */ static int -mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r) +mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { Scsi_Cmnd *sc; MPT_SCSI_HOST *hd; - MPT_SCSI_DEV *mpt_sdev = NULL; + SCSIIORequest_t *pScsiReq; + SCSIIOReply_t *pScsiReply; + unsigned long flags; u16 req_idx; + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if ((mf == NULL) || (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(KERN_ERR MYNAM ": ERROR! NULL or BAD req frame ptr (=%p)!\n", mf); - return 1; + printk(MYIOC_s_ERR_FMT "%s req frame ptr! (=%p)!\n", + ioc->name, mf?"BAD":"NULL", mf); + /* return 1; CHECKME SteveR. Don't free. */ + return 0; } - hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sc = hd->ScsiLookup[req_idx]; - hd->ScsiLookup[req_idx] = NULL; + if (sc == NULL) { + MPIHeader_t *hdr = (MPIHeader_t *)mf; - dmfprintk((KERN_INFO MYNAM ": ScsiDone (req:sc:reply=%p:%p:%p)\n", mf, sc, r)); + atomic_dec(&queue_depth); - atomic_dec(&queue_depth); + /* writeSDP1 will use the ScsiDoneCtx + * There is no processing for the reply. + * Just return to the calling function. + */ + if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST) + printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n", ioc->name); - /* - * Check for {1st} {IO} completion to "new" device. - * How do we know it's a new device? - * If we haven't set SDpnt->hostdata I guess... - */ - if (sc && sc->device) { - mpt_sdev = (MPT_SCSI_DEV*)sc->device->hostdata; - if (!mpt_sdev) { - dprintk((KERN_INFO MYNAM ": *NEW* SCSI device (%d:%d:%d)!\n", - sc->device->id, sc->device->lun, sc->device->channel)); - if ((sc->device->hostdata = kmalloc(sizeof(MPT_SCSI_DEV), GFP_ATOMIC)) == NULL) { - printk(KERN_ERR MYNAM ": ERROR - kmalloc(%d) FAILED!\n", (int)sizeof(MPT_SCSI_DEV)); - } else { - memset(sc->device->hostdata, 0, sizeof(MPT_SCSI_DEV)); - mpt_sdev = (MPT_SCSI_DEV *) sc->device->hostdata; - mpt_sdev->ioc = ioc; - } - } else { - if (++mpt_sdev->io_cnt && mptscsih_io_direction(sc) < 0) { - if (++mpt_sdev->read_cnt == 3) { - dprintk((KERN_INFO MYNAM ": 3rd DATA_IN, CDB[0]=%02x\n", - sc->cmnd[0])); - } - } -#if 0 - if (mpt_sdev->sense_sz) { - /* - * Completion of first IO down this path - * *should* invalidate device SenseData... - */ - mpt_sdev->sense_sz = 0; - } -#endif - } + mptscsih_freeChainBuffers(hd, req_idx); + return 1; } -#if 0 -{ - MPT_FRAME_HDR *mf_chk; + dmfprintk((MYIOC_s_INFO_FMT "ScsiDone (mf=%p,mr=%p,sc=%p)\n", + ioc->name, mf, mr, sc)); - /* This, I imagine, is a costly check, but... - * If abort/reset active, check to see if this is a IO - * that completed while ABORT/RESET for it is waiting - * on our taskQ! - */ - if (! Q_IS_EMPTY(&mpt_scsih_taskQ)) { - /* If ABORT for this IO is queued, zap it! */ - mf_chk = search_taskQ(1,sc,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); - if (mf_chk != NULL) { - sc->result = DID_ABORT << 16; - spin_lock_irqsave(sc->host->host_lock, flags); - sc->scsi_done(sc); - spin_unlock_irqrestore(sc->host->host_lock, flags); - return 1; - } - } -} -#endif + atomic_dec(&queue_depth); - if (r != NULL && sc != NULL) { - SCSIIOReply_t *pScsiReply; - SCSIIORequest_t *pScsiReq; - u16 status; + sc->result = DID_OK << 16; /* Set default reply as OK */ + pScsiReq = (SCSIIORequest_t *) mf; + pScsiReply = (SCSIIOReply_t *) mr; + + if (pScsiReply == NULL) { + /* special context reply handling */ - pScsiReply = (SCSIIOReply_t *) r; - pScsiReq = (SCSIIORequest_t *) mf; + /* If regular Inquiry cmd - save inquiry data + */ + if (pScsiReq->CDB[0] == INQUIRY && !(pScsiReq->CDB[1] & 0x3)) { + int dlen; + + dlen = le32_to_cpu(pScsiReq->DataLength); + if (dlen >= SCSI_STD_INQUIRY_BYTES) { + mptscsih_initTarget(hd, + hd->port, + sc->target, + pScsiReq->LUN[1], + sc->buffer, + dlen); + } + } + clear_sense_flag(hd, pScsiReq); + + if (hd->is_spi) + mptscsih_set_dvflags(hd, pScsiReq, sc->buffer); + } else { + u32 xfer_cnt; + u16 status; + u8 scsi_state; status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK; + scsi_state = pScsiReply->SCSIState; - dprintk((KERN_NOTICE MYNAM ": Uh-Oh! (req:sc:reply=%p:%p:%p)\n", mf, sc, r)); + dprintk((KERN_NOTICE " Uh-Oh! (%d:%d:%d) mf=%p, mr=%p, sc=%p\n", + ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], + mf, mr, sc)); dprintk((KERN_NOTICE " IOCStatus=%04xh, SCSIState=%02xh" - ", SCSIStatus=%02xh, IOCLogInfo=%08xh\n", - status, pScsiReply->SCSIState, pScsiReply->SCSIStatus, - le32_to_cpu(pScsiReply->IOCLogInfo))); + ", SCSIStatus=%02xh, IOCLogInfo=%08xh\n", + status, scsi_state, pScsiReply->SCSIStatus, + le32_to_cpu(pScsiReply->IOCLogInfo))); + + if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) + copy_sense_data(sc, hd, mf, pScsiReply); /* * Look for + dump FCP ResponseInfo[]! */ - if (pScsiReply->SCSIState & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { + if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { dprintk((KERN_NOTICE " FCP_ResponseInfo=%08xh\n", le32_to_cpu(pScsiReply->ResponseInfo))); } switch(status) { case MPI_IOCSTATUS_BUSY: /* 0x0002 */ - /*sc->result = DID_BUS_BUSY << 16;*/ /* YIKES! - Seems to - * kill linux interrupt - * handler - */ - sc->result = STS_BUSY; /* Try SCSI BUSY! */ - break; - - case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ - /* Not real sure here... */ - sc->result = DID_OK << 16; + /* CHECKME! + * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry) + * But not: DID_BUS_BUSY lest one risk + * killing interrupt handler:-( + */ + sc->result = STS_BUSY; break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */ @@ -299,10 +391,29 @@ break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ - /* Spoof to SCSI Selection Timeout! */ + /* Spoof to SCSI Selection Timeout! */ sc->result = DID_NO_CONNECT << 16; break; + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ +#ifndef MPT_SCSI_USE_NEW_EH + search_taskQ_for_cmd(sc, hd); +#endif + /* Linux handles an unsolicited DID_RESET better + * than an unsolicited DID_ABORT. + */ + sc->result = DID_RESET << 16; + break; + + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ +#ifndef MPT_SCSI_USE_NEW_EH + search_taskQ_for_cmd(sc, hd); +#endif + sc->result = DID_RESET << 16; + break; + + case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ /* * YIKES! I just discovered that SCSI IO which @@ -312,78 +423,148 @@ * Do upfront check for valid SenseData and give it * precedence! */ + sc->result = (DID_OK << 16) | pScsiReply->SCSIStatus; + clear_sense_flag(hd, pScsiReq); if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { - copy_sense_data(sc, hd, mf, pScsiReply); - sc->result = pScsiReply->SCSIStatus; - break; + /* Have already saved the status and sense data + */ + ; + } else if (pScsiReply->SCSIState & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { + /* What to do? + */ + sc->result = DID_SOFT_ERROR << 16; + } + else if (pScsiReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { + /* Not real sure here either... */ + sc->result = DID_RESET << 16; } - dprintk((KERN_NOTICE MYNAM ": sc->underflow={report ERR if < %02xh bytes xfer'd}\n", sc->underflow)); - dprintk((KERN_NOTICE MYNAM ": ActBytesXferd=%02xh\n", le32_to_cpu(pScsiReply->TransferCount))); + /* Give report and update residual count. + */ + xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); + dprintk((KERN_NOTICE " sc->underflow={report ERR if < %02xh bytes xfer'd}\n", + sc->underflow)); + dprintk((KERN_NOTICE " ActBytesXferd=%02xh\n", xfer_cnt)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) - sc->resid = sc->request_bufflen - le32_to_cpu(pScsiReply->TransferCount); - dprintk((KERN_NOTICE MYNAM ": SET sc->resid=%02xh\n", sc->resid)); + sc->resid = sc->request_bufflen - xfer_cnt; + dprintk((KERN_NOTICE " SET sc->resid=%02xh\n", sc->resid)); #endif - if (pScsiReq->CDB[0] == INQUIRY) { - sc->result = (DID_OK << 16); - break; - } + /* Report Queue Full + */ + if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL) + mptscsih_report_queue_full(sc, pScsiReply, pScsiReq); - /* workaround attempts... */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) - if (sc->resid >= 0x200) { - /* GRRRRR... - * //sc->result = DID_SOFT_ERROR << 16; - * Try spoofing to BUSY - */ - sc->result = STS_BUSY; - } else { - sc->result = 0; + /* If regular Inquiry cmd and some data was transferred, + * save inquiry data + */ + if ( pScsiReq->CDB[0] == INQUIRY + && !(pScsiReq->CDB[1] & 0x3) + && xfer_cnt >= SCSI_STD_INQUIRY_BYTES + ) { + mptscsih_initTarget(hd, + hd->port, + sc->target, + pScsiReq->LUN[1], + sc->buffer, + xfer_cnt); } -#else - sc->result = 0; -#endif - break; - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ - sc->result = DID_ABORT << 16; - break; - - case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ - sc->result = DID_RESET << 16; + if (hd->is_spi) + mptscsih_set_dvflags(hd, pScsiReq, sc->buffer); break; + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ - sc->result = pScsiReply->SCSIStatus; + sc->result = (DID_OK << 16) | pScsiReply->SCSIStatus; + clear_sense_flag(hd, pScsiReq); if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { - copy_sense_data(sc, hd, mf, pScsiReply); - - /* If running agains circa 200003dd 909 MPT f/w, - * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL - * (QUEUE_FULL) returned from device! --> get 0x0000?128 - * and with SenseBytes set to 0. + /* + * If running agains circa 200003dd 909 MPT f/w, + * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL + * (QUEUE_FULL) returned from device! --> get 0x0000?128 + * and with SenseBytes set to 0. */ if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL) mptscsih_report_queue_full(sc, pScsiReply, pScsiReq); + +#ifndef MPT_SCSI_USE_NEW_EH + /* ADDED 20011120 -sralston + * Scsi mid-layer (old_eh) doesn't seem to like it + * when RAID returns SCSIStatus=02 (CHECK CONDITION), + * SenseKey=01 (RECOVERED ERROR), ASC/ASCQ=95/01. + * Seems to be * treating this as a IO error:-( + * + * So just lie about it altogether here. + * + * NOTE: It still gets reported to syslog via + * mpt_ScsiHost_ErrorReport from copy_sense_data + * call far above. + */ + if ( pScsiReply->SCSIStatus == STS_CHECK_CONDITION + && SD_Sense_Key(sc->sense_buffer) == SK_RECOVERED_ERROR + ) { + sc->result = 0; + } +#endif + } - else if (pScsiReply->SCSIState & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { + else if (pScsiReply->SCSIState & + (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS) + ) { /* - * What to do? + * What to do? */ sc->result = DID_SOFT_ERROR << 16; } else if (pScsiReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { /* Not real sure here either... */ - sc->result = DID_ABORT << 16; + sc->result = DID_RESET << 16; + } + else if (pScsiReply->SCSIState & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) { + /* Device Inq. data indicates that it supports + * QTags, but rejects QTag messages. + * This command completed OK. + * + * Not real sure here either so do nothing... */ } if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL) mptscsih_report_queue_full(sc, pScsiReply, pScsiReq); + /* Add handling of: + * Reservation Conflict, Busy, + * Command Terminated, CHECK + */ + + /* If regular Inquiry cmd - save inquiry data + */ + xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); + if ( sc->result == (DID_OK << 16) + && pScsiReq->CDB[0] == INQUIRY + && !(pScsiReq->CDB[1] & 0x3) + && xfer_cnt >= SCSI_STD_INQUIRY_BYTES + ) { + mptscsih_initTarget(hd, + hd->port, + sc->target, + pScsiReq->LUN[1], + sc->buffer, + xfer_cnt); + } + + if (hd->is_spi) + mptscsih_set_dvflags(hd, pScsiReq, sc->buffer); + break; + + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ + if (pScsiReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { + /* Not real sure here either... */ + sc->result = DID_RESET << 16; + } else + sc->result = DID_SOFT_ERROR << 16; break; case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ @@ -395,50 +576,50 @@ case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */ case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ - case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ - case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */ default: /* - * What to do? + * What to do? */ sc->result = DID_SOFT_ERROR << 16; break; } /* switch(status) */ - dprintk((KERN_NOTICE MYNAM ": sc->result set to %08xh\n", sc->result)); + dprintk((KERN_NOTICE " sc->result set to %08xh\n", sc->result)); + } /* end of address reply case */ + + /* Unmap the DMA buffers, if any. */ + if (sc->use_sg) { + pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer, + sc->use_sg, scsi_to_pci_dma_dir(sc->sc_data_direction)); + } else if (sc->request_bufflen) { + scPrivate *my_priv; + + my_priv = (scPrivate *) &sc->SCp; + pci_unmap_single(ioc->pcidev, (dma_addr_t)(ulong)my_priv->p1, + sc->request_bufflen, + scsi_to_pci_dma_dir(sc->sc_data_direction)); } - if (sc != NULL) { - unsigned long flags; + hd->ScsiLookup[req_idx] = NULL; - /* Unmap the DMA buffers, if any. */ - if (sc->use_sg) { - pci_unmap_sg(ioc->pcidev, - (struct scatterlist *) sc->request_buffer, - sc->use_sg, - scsi_to_pci_dma_dir(sc->sc_data_direction)); - } else if (sc->request_bufflen) { - pci_unmap_single(ioc->pcidev, - (dma_addr_t)((long)sc->SCp.ptr), - sc->request_bufflen, - scsi_to_pci_dma_dir(sc->sc_data_direction)); - } + sc->host_scribble = NULL; /* CHECKME! - Do we need to clear this??? */ - spin_lock_irqsave(sc->host->host_lock, flags); - sc->scsi_done(sc); - spin_unlock_irqrestore(sc->host->host_lock, flags); - } + spin_lock_irqsave(sc->host->host_lock, flags); + sc->scsi_done(sc); /* Issue the command callback */ + spin_unlock_irqrestore(sc->host->host_lock, flags); + /* Free Chain buffers */ + mptscsih_freeChainBuffers(hd, req_idx); return 1; } -#ifndef MPT_SCSI_USE_NEW_EH +#ifndef MPT_SCSI_USE_NEW_EH /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * search_taskQ - Search SCSI task mgmt request queue for specific - * request type + * request type. * @remove: (Boolean) Should request be removed if found? * @sc: Pointer to Scsi_Cmnd structure * @task_type: Task type to search for @@ -447,42 +628,55 @@ * was not found. */ static MPT_FRAME_HDR * -search_taskQ(int remove, Scsi_Cmnd *sc, u8 task_type) +search_taskQ(int remove, Scsi_Cmnd *sc, MPT_SCSI_HOST *hd, u8 task_type) { MPT_FRAME_HDR *mf = NULL; unsigned long flags; int count = 0; int list_sz; - dslprintk((KERN_INFO MYNAM ": spinlock#1\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - list_sz = mpt_scsih_taskQ_cnt; - if (! Q_IS_EMPTY(&mpt_scsih_taskQ)) { - mf = mpt_scsih_taskQ.head; + dprintk((KERN_INFO MYNAM ": search_taskQ(%d,sc=%p,%d) called\n", + remove, sc, task_type)); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + list_sz = hd->taskQcnt; + if (! Q_IS_EMPTY(&hd->taskQ)) { + mf = hd->taskQ.head; do { count++; if (mf->u.frame.linkage.argp1 == sc && mf->u.frame.linkage.arg1 == task_type) { if (remove) { Q_DEL_ITEM(&mf->u.frame.linkage); - mpt_scsih_taskQ_cnt--; + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + + /* Don't save mf into nextmf because + * exit after command has been deleted. + */ + + /* Place the MF back on the FreeQ */ + Q_ADD_TAIL(&hd->ioc->FreeQ, + &mf->u.frame.linkage, + MPT_FRAME_HDR); +#ifdef MFCNT + hd->ioc->mfcnt--; +#endif } break; } - } while ((mf = mf->u.frame.linkage.forw) != (MPT_FRAME_HDR*)&mpt_scsih_taskQ); - if (mf == (MPT_FRAME_HDR*)&mpt_scsih_taskQ) { + } while ((mf = mf->u.frame.linkage.forw) != (MPT_FRAME_HDR*)&hd->taskQ); + if (mf == (MPT_FRAME_HDR*)&hd->taskQ) { mf = NULL; } } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); if (list_sz) { - dprintk((KERN_INFO MYNAM ": search_taskQ(%d,%p,%d) results=%p (%sFOUND%s)!\n", - remove, sc, task_type, + dprintk((KERN_INFO " Results=%p (%sFOUND%s)!\n", mf, mf ? "" : "NOT_", (mf && remove) ? "+REMOVED" : "" )); - dprintk((KERN_INFO MYNAM ": (searched thru %d of %d items on taskQ)\n", + dprintk((KERN_INFO " (searched thru %d of %d items on taskQ)\n", count, list_sz )); } @@ -490,12 +684,336 @@ return mf; } +/* + * clean_taskQ - Clean the SCSI task mgmt request for + * this SCSI host instance. + * @hd: MPT_SCSI_HOST pointer + * + * Returns: None. + */ +static void +clean_taskQ(MPT_SCSI_HOST *hd) +{ + MPT_FRAME_HDR *mf = NULL; + MPT_FRAME_HDR *nextmf = NULL; + MPT_ADAPTER *ioc = hd->ioc; + unsigned long flags; + + dprintk((KERN_INFO MYNAM ": clean_taskQ called\n")); + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&hd->taskQ)) { + mf = hd->taskQ.head; + do { + Q_DEL_ITEM(&mf->u.frame.linkage); + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + + nextmf = mf->u.frame.linkage.forw; + + /* Place the MF back on the FreeQ */ + Q_ADD_TAIL(&ioc->FreeQ, &mf->u.frame.linkage, + MPT_FRAME_HDR); +#ifdef MFCNT + hd->ioc->mfcnt--; +#endif + } while ((mf = nextmf) != (MPT_FRAME_HDR*)&hd->taskQ); + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + return; +} + +/* + * search_taskQ_for_cmd - Search the SCSI task mgmt request queue for + * the specified command. If found, delete + * @hd: MPT_SCSI_HOST pointer + * + * Returns: None. + */ +static void +search_taskQ_for_cmd(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd) +{ + MPT_FRAME_HDR *mf = NULL; + unsigned long flags; + int count = 0; + + dprintk((KERN_INFO MYNAM ": search_taskQ_for_cmd(sc=%p) called\n", sc)); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&hd->taskQ)) { + mf = hd->taskQ.head; + do { + count++; + if (mf->u.frame.linkage.argp1 == sc) { + Q_DEL_ITEM(&mf->u.frame.linkage); + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + dprintk((KERN_INFO MYNAM + ": Cmd %p found! Deleting.\n", sc)); + + /* Don't save mf into nextmf because + * exit after command has been deleted. + */ + + /* Place the MF back on the FreeQ */ + Q_ADD_TAIL(&hd->ioc->FreeQ, + &mf->u.frame.linkage, + MPT_FRAME_HDR); +#ifdef MFCNT + hd->ioc->mfcnt--; #endif + break; + } + } while ((mf = mf->u.frame.linkage.forw) != (MPT_FRAME_HDR*)&hd->taskQ); + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + return; +} + +#endif /* } MPT_SCSI_USE_NEW_EH */ + + +/* + * Flush all commands on the doneQ. + * Lock Q when deleting/adding members + * Lock io_request_lock for OS callback. + */ +static void +flush_doneQ(MPT_SCSI_HOST *hd) +{ + MPT_DONE_Q *buffer; + Scsi_Cmnd *SCpnt; + unsigned long flags; + + /* Flush the doneQ. + */ + dprintk((KERN_INFO MYNAM ": flush_doneQ called\n")); + while (1) { + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (Q_IS_EMPTY(&hd->doneQ)) { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + break; + } + + buffer = hd->doneQ.head; + /* Delete from Q + */ + Q_DEL_ITEM(buffer); + + /* Set the Scsi_Cmnd pointer + */ + SCpnt = (Scsi_Cmnd *) buffer->argp; + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + + /* Do the OS callback. + */ + spin_lock_irqsave(SCpnt->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(SCpnt->host->host_lock, flags); + } + + return; +} + +/* + * Search the doneQ for a specific command. If found, delete from Q. + * Calling function will finish processing. + */ +static void +search_doneQ_for_cmd(MPT_SCSI_HOST *hd, Scsi_Cmnd *SCpnt) +{ + unsigned long flags; + MPT_DONE_Q *buffer; + + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->doneQ)) { + buffer = hd->doneQ.head; + do { + Scsi_Cmnd *sc = (Scsi_Cmnd *) buffer->argp; + if (SCpnt == sc) { + Q_DEL_ITEM(buffer); + SCpnt->result = sc->result; + + /* Set the Scsi_Cmnd pointer + */ + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + break; + } + } while ((buffer = buffer->forw) != (MPT_DONE_Q *) &hd->doneQ); + } + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + return; +} + +/* + * mptscsih_flush_running_cmds - For each command found, search + * Scsi_Host instance taskQ and reply to OS. + * Called only if recovering from a FW reload. + * @hd: Pointer to a SCSI HOST structure + * + * Returns: None. + * + * Must be called while new I/Os are being queued. + */ +static void +mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) +{ + Scsi_Cmnd *SCpnt = NULL; + MPT_FRAME_HDR *mf = NULL; + int ii; + int max = hd->ioc->req_depth; + unsigned long flags; + + dprintk((KERN_INFO MYNAM ": flush_ScsiLookup called\n")); + for (ii= 0; ii < max; ii++) { + if ((SCpnt = hd->ScsiLookup[ii]) != NULL) { + + /* Command found. + */ + +#ifndef MPT_SCSI_USE_NEW_EH + /* Search taskQ, if found, delete. + */ + search_taskQ_for_cmd(SCpnt, hd); +#endif + + /* Search pendingQ, if found, + * delete from Q. If found, do not decrement + * queue_depth, command never posted. + */ + if (mptscsih_search_pendingQ(hd, ii) == NULL) + atomic_dec(&queue_depth); + + /* Null ScsiLookup index + */ + hd->ScsiLookup[ii] = NULL; + + mf = MPT_INDEX_2_MFPTR(hd->ioc, ii); + dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", + mf, SCpnt)); + + /* Set status + * Do OS callback + * Free chain buffers + * Free message frame + */ + SCpnt->result = DID_RESET << 16; + SCpnt->host_scribble = NULL; + spin_lock_irqsave(SCpnt->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); /* Issue the command callback */ + spin_unlock_irqrestore(SCpnt->host->host_lock, flags); + + /* Free Chain buffers */ + mptscsih_freeChainBuffers(hd, ii); + + /* Free Message frames */ + mpt_free_msg_frame(ScsiDoneCtx, hd->ioc->id, mf); + } + } + return; +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_initChainBuffers - Allocate memory for and initialize + * chain buffers, chain buffer control arrays and spinlock. + * @hd: Pointer to MPT_SCSI_HOST structure + * @init: If set, initialize the spin lock. + */ +static int +mptscsih_initChainBuffers (MPT_SCSI_HOST *hd, int init) +{ + MPT_FRAME_HDR *chain; + u8 *mem; + unsigned long flags; + int sz, ii, numChain; + + + /* Chain buffer allocations + * Allocate and initialize tracker structures + */ + if (hd->ioc->req_sz <= 64) + numChain = MPT_SG_REQ_64_SCALE * hd->ioc->req_depth; + else if (hd->ioc->req_sz <= 96) + numChain = MPT_SG_REQ_96_SCALE * hd->ioc->req_depth; + else + numChain = MPT_SG_REQ_128_SCALE * hd->ioc->req_depth; + + sz = numChain * sizeof(int); + + if (hd->ReqToChain == NULL) { + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + return -1; + + hd->ReqToChain = (int *) mem; + } else { + mem = (u8 *) hd->ReqToChain; + } + memset(mem, 0xFF, sz); + + if (hd->ChainToChain == NULL) { + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + return -1; + + hd->ChainToChain = (int *) mem; + } else { + mem = (u8 *) hd->ChainToChain; + } + memset(mem, 0xFF, sz); + + if (hd->ChainBuffer == NULL) { + /* Allocate free chain buffer pool + */ + sz = numChain * hd->ioc->req_sz; + mem = pci_alloc_consistent(hd->ioc->pcidev, sz, &hd->ChainBufferDMA); + if (mem == NULL) + return -1; + + hd->ChainBuffer = (u8*)mem; + } else { + mem = (u8 *) hd->ChainBuffer; + } + memset(mem, 0, sz); + + dprintk((KERN_INFO " ChainBuffer @ %p(%p), sz=%d\n", + hd->ChainBuffer, (void *)(ulong)hd->ChainBufferDMA, sz)); + + /* Initialize the free chain Q. + */ + if (init) { + spin_lock_init(&hd->FreeChainQlock); + } + + spin_lock_irqsave (&hd->FreeChainQlock, flags); + Q_INIT(&hd->FreeChainQ, MPT_FRAME_HDR); + + /* Post the chain buffers to the FreeChainQ. + */ + mem = (u8 *)hd->ChainBuffer; + for (ii=0; ii < numChain; ii++) { + chain = (MPT_FRAME_HDR *) mem; + Q_ADD_TAIL(&hd->FreeChainQ.head, &chain->u.frame.linkage, MPT_FRAME_HDR); + mem += hd->ioc->req_sz; + } + spin_unlock_irqrestore(&hd->FreeChainQlock, flags); + + return 0; +} +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * Hack! I'd like to report if a device is returning QUEUE_FULL + * Hack! It might be nice to report if a device is returning QUEUE_FULL * but maybe not each and every time... */ static long last_queue_full = 0; @@ -518,8 +1036,12 @@ long time = jiffies; if (time - last_queue_full > 10 * HZ) { - printk(KERN_WARNING MYNAM ": Device reported QUEUE_FULL! SCSI bus:target:lun = %d:%d:%d\n", - 0, sc->target, sc->lun); + char *ioc_str = "ioc?"; + + if (sc->host && sc->host->hostdata) + ioc_str = ((MPT_SCSI_HOST *)sc->host->hostdata)->ioc->name; + printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n", + ioc_str, 0, sc->target, sc->lun); last_queue_full = time; } } @@ -527,7 +1049,7 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int BeenHereDoneThat = 0; -/* SCSI fops start here... */ +/* SCSI host fops start here... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_detect - Register MPT adapter(s) as SCSI host(s) with @@ -546,25 +1068,22 @@ struct Scsi_Host *sh = NULL; MPT_SCSI_HOST *hd = NULL; MPT_ADAPTER *this; + MPT_DONE_Q *freedoneQ; unsigned long flags; - int sz; + int sz, ii; + int numSGE = 0; + int scale; u8 *mem; if (! BeenHereDoneThat++) { show_mptmod_ver(my_NAME, my_VERSION); - if ((ScsiDoneCtx = mpt_register(mptscsih_io_done, MPTSCSIH_DRIVER)) <= 0) { - printk(KERN_ERR MYNAM ": Failed to register callback1 with MPT base driver\n"); - return mpt_scsi_hosts; - } - if ((ScsiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSCSIH_DRIVER)) <= 0) { - printk(KERN_ERR MYNAM ": Failed to register callback2 with MPT base driver\n"); - return mpt_scsi_hosts; - } + ScsiDoneCtx = mpt_register(mptscsih_io_done, MPTSCSIH_DRIVER); + ScsiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSCSIH_DRIVER); + ScsiScanDvCtx = mpt_register(mptscsih_scandv_complete, MPTSCSIH_DRIVER); #ifndef MPT_SCSI_USE_NEW_EH - Q_INIT(&mpt_scsih_taskQ, MPT_FRAME_HDR); - spin_lock_init(&mpt_scsih_taskQ_lock); + spin_lock_init(&mytaskQ_lock); #endif if (mpt_event_register(ScsiDoneCtx, mptscsih_event_process) == 0) { @@ -579,106 +1098,263 @@ /* FIXME! */ } } - dprintk((KERN_INFO MYNAM ": mpt_scsih_detect()\n")); +#ifdef MODULE + /* Evaluate the command line arguments, if any */ + if (mptscsih) + mptscsih_setup(mptscsih); +#endif +#ifndef MPT_SCSI_USE_NEW_EH + atomic_set(&mpt_taskQdepth, 0); +#endif + this = mpt_adapter_find_first(); while (this != NULL) { - /* FIXME! Multi-port (aka FC929) support... - * for (i = 0; i < this->facts.NumberOfPorts; i++) - */ + int portnum; + for (portnum=0; portnum < this->facts.NumberOfPorts; portnum++) { - /* 20010215 -sralston - * Added sanity check on SCSI Initiator-mode enabled - * for this MPT adapter. - */ - if (!(this->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) { - printk(KERN_ERR MYNAM ": Skipping %s because SCSI Initiator mode is NOT enabled!\n", - this->name); - this = mpt_adapter_find_next(this); - continue; - } + /* 20010215 -sralston + * Added sanity check on SCSI Initiator-mode enabled + * for this MPT adapter. + */ + if (!(this->pfacts[portnum].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) { + printk(MYIOC_s_WARN_FMT "Skipping because SCSI Initiator mode is NOT enabled!\n", + this->name); + continue; + } - /* 20010202 -sralston - * Added sanity check on readiness of the MPT adapter. - */ - if (this->last_state != MPI_IOC_STATE_OPERATIONAL) { - printk(KERN_ERR MYNAM ": ERROR - Skipping %s because it's not operational!\n", - this->name); - this = mpt_adapter_find_next(this); - continue; - } + /* 20010202 -sralston + * Added sanity check on readiness of the MPT adapter. + */ + if (this->last_state != MPI_IOC_STATE_OPERATIONAL) { + printk(MYIOC_s_WARN_FMT "Skipping because it's not operational!\n", + this->name); + continue; + } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) - tpnt->proc_dir = &proc_mpt_scsihost; + tpnt->proc_dir = &proc_mpt_scsihost; #endif - sh = scsi_register(tpnt, sizeof(MPT_SCSI_HOST)); - if (sh != NULL) { - save_flags(flags); - cli(); - sh->io_port = 0; - sh->n_io_port = 0; - sh->irq = 0; - - /* Yikes! This is important! - * Otherwise, by default, linux only scans target IDs 0-7! - * - * BUG FIX! 20010618 -sralston & pdelaney - * FC919 testing was encountering "duplicate" FC devices, - * as it turns out because the 919 was returning 512 - * for PortFacts.MaxDevices, causing a wraparound effect - * in SCSI IO requests. So instead of using: - * sh->max_id = this->pfacts[0].MaxDevices - 1 - * we'll use a definitive max here. - */ - sh->max_id = MPT_MAX_FC_DEVICES; - - sh->this_id = this->pfacts[0].PortSCSIID; - - restore_flags(flags); - - hd = (MPT_SCSI_HOST *) sh->hostdata; - hd->ioc = this; - hd->port = 0; /* FIXME! */ - - /* SCSI needs Scsi_Cmnd lookup table! - * (with size equal to req_depth*PtrSz!) - */ - sz = hd->ioc->req_depth * sizeof(void *); - mem = kmalloc(sz, GFP_KERNEL); - if (mem == NULL) - return mpt_scsi_hosts; - - memset(mem, 0, sz); - hd->ScsiLookup = (struct scsi_cmnd **) mem; - - dprintk((KERN_INFO MYNAM ": ScsiLookup @ %p, sz=%d\n", - hd->ScsiLookup, sz)); - - /* SCSI also needs SG buckets/hunk management! - * (with size equal to N * req_sz * req_depth!) - * (where N is number of SG buckets per hunk) - */ - sz = MPT_SG_BUCKETS_PER_HUNK * hd->ioc->req_sz * hd->ioc->req_depth; - mem = pci_alloc_consistent(hd->ioc->pcidev, sz, - &hd->SgHunksDMA); - if (mem == NULL) - return mpt_scsi_hosts; + sh = scsi_register(tpnt, sizeof(MPT_SCSI_HOST)); + if (sh != NULL) { + save_flags(flags); + cli(); + sh->io_port = 0; + sh->n_io_port = 0; + sh->irq = 0; + + /* Yikes! This is important! + * Otherwise, by default, linux + * only scans target IDs 0-7! + * pfactsN->MaxDevices unreliable + * (not supported in early + * versions of the FW). + * max_id = 1 + actual max id, + * max_lun = 1 + actual last lun, + * see hosts.h :o( + */ + if ((int)this->chip_type > (int)FC929) + sh->max_id = MPT_MAX_SCSI_DEVICES; + else { + /* For FC, increase the queue depth + * from MPT_SCSI_CAN_QUEUE (31) + * to MPT_FC_CAN_QUEUE (63). + */ + sh->can_queue = MPT_FC_CAN_QUEUE; + sh->max_id = MPT_MAX_FC_DEVICES<256 ? MPT_MAX_FC_DEVICES : 255; + } + sh->max_lun = MPT_LAST_LUN + 1; - memset(mem, 0, sz); - hd->SgHunks = (u8*)mem; + sh->this_id = this->pfacts[portnum].PortSCSIID; - dprintk((KERN_INFO MYNAM ": SgHunks @ %p(%08x), sz=%d\n", - hd->SgHunks, hd->SgHunksDMA, sz)); + /* OS entry to allow host drivers to force + * a queue depth on a per device basis. + */ + sh->select_queue_depths = mptscsih_select_queue_depths; - hd->qtag_tick = jiffies; + /* Verify that we won't exceed the maximum + * number of chain buffers + * We can optimize: ZZ = req_sz/sizeof(MptSge_t) + * For 32bit SGE's: + * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ + * + (req_sz - 64)/sizeof(MptSge_t) + * A slightly different algorithm is required for + * 64bit SGEs. + */ + scale = this->req_sz/sizeof(MptSge_t); + if (sizeof(MptSge_t) == sizeof(SGESimple32_t)) { + numSGE = 1 + (scale - 1) * (this->facts.MaxChainDepth-1) + scale + + (this->req_sz - 64) / (sizeof(MptSge_t)); + } else if (sizeof(MptSge_t) == sizeof(SGESimple64_t)) { + numSGE = (scale - 1) * (this->facts.MaxChainDepth-1) + scale + + (this->req_sz - 60) / (sizeof(MptSge_t)); + } + + if (numSGE < sh->sg_tablesize) { + /* Reset this value */ + dprintk((MYIOC_s_INFO_FMT + "Resetting sg_tablesize to %d from %d\n", + this->name, numSGE, sh->sg_tablesize)); + sh->sg_tablesize = numSGE; + } + + restore_flags(flags); + + hd = (MPT_SCSI_HOST *) sh->hostdata; + hd->ioc = this; + + if ((int)this->chip_type > (int)FC929) + hd->is_spi = 1; + + if (DmpService && + (this->chip_type == FC919 || this->chip_type == FC929)) + hd->is_multipath = 1; + + hd->port = 0; /* FIXME! */ + + /* SCSI needs Scsi_Cmnd lookup table! + * (with size equal to req_depth*PtrSz!) + */ + sz = hd->ioc->req_depth * sizeof(void *); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + goto done; + + memset(mem, 0, sz); + hd->ScsiLookup = (struct scsi_cmnd **) mem; + + dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n", + this->name, hd->ScsiLookup, sz)); + + if (mptscsih_initChainBuffers(hd, 1) < 0) + goto done; + + /* Allocate memory for free and doneQ's + */ + sz = sh->can_queue * sizeof(MPT_DONE_Q); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + goto done; + + memset(mem, 0xFF, sz); + hd->memQ = mem; + + /* Initialize the free, done and pending Qs. + */ + Q_INIT(&hd->freeQ, MPT_DONE_Q); + Q_INIT(&hd->doneQ, MPT_DONE_Q); + Q_INIT(&hd->pendingQ, MPT_DONE_Q); + spin_lock_init(&hd->freedoneQlock); + + mem = hd->memQ; + for (ii=0; ii < sh->can_queue; ii++) { + freedoneQ = (MPT_DONE_Q *) mem; + Q_ADD_TAIL(&hd->freeQ.head, freedoneQ, MPT_DONE_Q); + mem += sizeof(MPT_DONE_Q); + } + + /* Initialize this Scsi_Host + * internal task Q. + */ + Q_INIT(&hd->taskQ, MPT_FRAME_HDR); + hd->taskQcnt = 0; + + /* Allocate memory for the device structures. + * A non-Null pointer at an offset + * indicates a device exists. + * max_id = 1 + maximum id (hosts.h) + */ + sz = sh->max_id * sizeof(void *); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + goto done; + + memset(mem, 0, sz); + hd->Targets = (VirtDevice **) mem; + + dprintk((KERN_INFO " Targets @ %p, sz=%d\n", hd->Targets, sz)); + + + /* Clear the TM flags + */ + hd->tmPending = 0; + hd->resetPending = 0; + hd->abortSCpnt = NULL; + hd->tmPtr = NULL; + hd->numTMrequests = 0; + + /* Clear the pointer used to store + * single-threaded commands, i.e., those + * issued during a bus scan, dv and + * configuration pages. + */ + hd->cmdPtr = NULL; + + /* Attach the SCSI Host to the IOC structure + */ + this->sh = sh; + + /* Initialize this SCSI Hosts' timers + * To use, set the timer expires field + * and add_timer + */ + init_timer(&hd->timer); + hd->timer.data = (unsigned long) hd; + hd->timer.function = mptscsih_timer_expired; + + init_timer(&hd->TMtimer); + hd->TMtimer.data = (unsigned long) hd; + hd->TMtimer.function = mptscsih_taskmgmt_timeout; + hd->qtag_tick = jiffies; + + /* Moved Earlier Pam D */ + /* this->sh = sh; */ + + if (hd->is_spi) { + /* Update with the driver setup + * values. + */ + if (hd->ioc->spi_data.maxBusWidth > driver_setup.max_width) + hd->ioc->spi_data.maxBusWidth = driver_setup.max_width; + if (hd->ioc->spi_data.minSyncFactor < driver_setup.min_sync_fac) + hd->ioc->spi_data.minSyncFactor = driver_setup.min_sync_fac; + + if (hd->ioc->spi_data.minSyncFactor == MPT_ASYNC) + hd->ioc->spi_data.maxSyncOffset = 0; + + hd->negoNvram = 0; +#ifdef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + hd->negoNvram = MPT_SCSICFG_USE_NVRAM; +#endif + if (driver_setup.dv == 0) + hd->negoNvram = MPT_SCSICFG_USE_NVRAM; + + hd->ioc->spi_data.forceDv = 0; + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) + hd->ioc->spi_data.dvStatus[ii] = MPT_SCSICFG_NEGOTIATE; + + + ddvprintk((MYIOC_s_INFO_FMT + "dv %x width %x factor %x \n", + hd->ioc->name, driver_setup.dv, + driver_setup.max_width, + driver_setup.min_sync_fac)); + + } + + mpt_scsi_hosts++; + } + + } /* for each adapter port */ - this->sh = sh; - mpt_scsi_hosts++; - } this = mpt_adapter_find_next(this); } +done: + if (mpt_scsi_hosts > 0) + register_reboot_notifier(&mptscsih_notifier); + return mpt_scsi_hosts; } @@ -699,63 +1375,156 @@ mptscsih_release(struct Scsi_Host *host) { MPT_SCSI_HOST *hd; -#ifndef MPT_SCSI_USE_NEW_EH + int count; unsigned long flags; - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - if (mpt_scsih_taskQ_bh_active) { - int count = 10 * HZ; + hd = (MPT_SCSI_HOST *) host->hostdata; - dprintk((KERN_INFO MYNAM ": Info: Zapping TaskMgmt thread!\n")); +#ifndef MPT_SCSI_USE_NEW_EH +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + spin_lock_irqsave(&dvtaskQ_lock, flags); + dvtaskQ_release = 1; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); +#endif - /* Zap the taskQ! */ - Q_INIT(&mpt_scsih_taskQ, MPT_FRAME_HDR); - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_lock_irqsave(&mytaskQ_lock, flags); + if (mytaskQ_bh_active) { + count = 10 * HZ; - while(mpt_scsih_taskQ_bh_active && --count) { - current->state = TASK_INTERRUPTIBLE; + spin_unlock_irqrestore(&mytaskQ_lock, flags); + dprintk((KERN_INFO MYNAM ": Info: Zapping TaskMgmt thread!\n")); + clean_taskQ(hd); + + while(mytaskQ_bh_active && --count) { + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } if (!count) - printk(KERN_ERR MYNAM ": ERROR! TaskMgmt thread still active!\n"); + printk(KERN_ERR MYNAM ": ERROR - TaskMgmt thread still active!\n"); } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_unlock_irqrestore(&mytaskQ_lock, flags); #endif - hd = (MPT_SCSI_HOST *) host->hostdata; +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + /* Check DV thread active */ + count = 10 * HZ; + spin_lock_irqsave(&dvtaskQ_lock, flags); + while(dvtaskQ_active && --count) { + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + spin_lock_irqsave(&dvtaskQ_lock, flags); + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + if (!count) + printk(KERN_ERR MYNAM ": ERROR - DV thread still active!\n"); +#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) + else + printk(KERN_ERR MYNAM ": DV thread orig %d, count %d\n", 10 * HZ, count); +#endif +#endif + + unregister_reboot_notifier(&mptscsih_notifier); + if (hd != NULL) { - int sz1, sz2; + int sz1, sz2, sz3, sztarget=0; + int szchain = 0; + int szQ = 0; + int scale; + + /* Synchronize disk caches + */ + (void) mptscsih_synchronize_cache(hd, 0); + + sz1 = sz2 = sz3 = 0; + + if (hd->ioc->req_sz <= 64) + scale = MPT_SG_REQ_64_SCALE; + else if (hd->ioc->req_sz <= 96) + scale = MPT_SG_REQ_96_SCALE; + else + scale = MPT_SG_REQ_128_SCALE; - sz1 = sz2 = 0; if (hd->ScsiLookup != NULL) { sz1 = hd->ioc->req_depth * sizeof(void *); kfree(hd->ScsiLookup); hd->ScsiLookup = NULL; } - if (hd->SgHunks != NULL) { + if (hd->ReqToChain != NULL) { + szchain += scale * hd->ioc->req_depth * sizeof(int); + kfree(hd->ReqToChain); + hd->ReqToChain = NULL; + } + + if (hd->ChainToChain != NULL) { + szchain += scale * hd->ioc->req_depth * sizeof(int); + kfree(hd->ChainToChain); + hd->ChainToChain = NULL; + } + + if (hd->ChainBuffer != NULL) { + sz2 = scale * hd->ioc->req_depth * hd->ioc->req_sz; + szchain += sz2; - sz2 = MPT_SG_BUCKETS_PER_HUNK * hd->ioc->req_sz * hd->ioc->req_depth; pci_free_consistent(hd->ioc->pcidev, sz2, - hd->SgHunks, hd->SgHunksDMA); - hd->SgHunks = NULL; + hd->ChainBuffer, hd->ChainBufferDMA); + hd->ChainBuffer = NULL; + } + + if (hd->memQ != NULL) { + szQ = host->can_queue * sizeof(MPT_DONE_Q); + kfree(hd->memQ); + hd->memQ = NULL; } - dprintk((KERN_INFO MYNAM ": Free'd ScsiLookup (%d) and SgHunks (%d) memory\n", sz1, sz2)); + + if (hd->Targets != NULL) { + int max, ii; + + /* + * Free any target structures that were allocated. + */ + if (hd->is_spi) { + max = MPT_MAX_SCSI_DEVICES; + } else { + max = MPT_MAX_FC_DEVICES; + } + for (ii=0; ii < max; ii++) { + if (hd->Targets[ii]) { + kfree(hd->Targets[ii]); + hd->Targets[ii] = NULL; + sztarget += sizeof(VirtDevice); + } + } + + /* + * Free pointer array. + */ + sz3 = max * sizeof(void *); + kfree(hd->Targets); + hd->Targets = NULL; + } + + dprintk((MYIOC_s_INFO_FMT "Free'd ScsiLookup (%d), chain (%d) and Target (%d+%d) memory\n", + hd->ioc->name, sz1, szchain, sz3, sztarget)); + dprintk(("Free'd done and free Q (%d) memory\n", szQ)); } + /* NULL the Scsi_Host pointer + */ + hd->ioc->sh = NULL; + scsi_unregister(host); if (mpt_scsi_hosts) { if (--mpt_scsi_hosts == 0) { -#if 0 - mptscsih_flush_pending(); -#endif mpt_reset_deregister(ScsiDoneCtx); dprintk((KERN_INFO MYNAM ": Deregistered for IOC reset notifications\n")); mpt_event_deregister(ScsiDoneCtx); dprintk((KERN_INFO MYNAM ": Deregistered for IOC event notifications\n")); - mpt_deregister(ScsiDoneCtx); + mpt_deregister(ScsiScanDvCtx); mpt_deregister(ScsiTaskCtx); + mpt_deregister(ScsiDoneCtx); if (info_kbuf != NULL) kfree(info_kbuf); @@ -767,6 +1536,45 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** + * mptscsih_halt - Process the reboot notification + * @nb: Pointer to a struct notifier_block (ignored) + * @event: event (SYS_HALT, SYS_RESTART, SYS_POWER_OFF) + * @buf: Pointer to a data buffer (ignored) + * + * This routine called if a system shutdown or reboot is to occur. + * + * Return NOTIFY_DONE if this is something other than a reboot message. + * NOTIFY_OK if this is a reboot message. + */ +static int +mptscsih_halt(struct notifier_block *nb, ulong event, void *buf) +{ + MPT_ADAPTER *ioc = NULL; + MPT_SCSI_HOST *hd = NULL; + + /* Ignore all messages other than reboot message + */ + if ((event != SYS_RESTART) && (event != SYS_HALT) + && (event != SYS_POWER_OFF)) + return (NOTIFY_DONE); + + for (ioc = mpt_adapter_find_first(); ioc != NULL; ioc = mpt_adapter_find_next(ioc)) { + /* Flush the cache of this adapter + */ + if (ioc->sh) { + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd) { + mptscsih_synchronize_cache(hd, 0); + } + } + } + + unregister_reboot_notifier(&mptscsih_notifier); + return NOTIFY_OK; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** * mptscsih_info - Return information about MPT adapter * @SChost: Pointer to Scsi_Host structure * @@ -794,14 +1602,6 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int max_qd = 1; -#ifdef MPT_DEBUG - static int max_sges = 0; - static int max_xfer = 0; -#endif -#if 0 - static int max_num_sges = 0; - static int max_sgent_len = 0; -#endif #if 0 static int index_log[128]; static int index_ent = 0; @@ -814,6 +1614,47 @@ #else #define ADD_INDEX_LOG(req_ent) do { } while(0) #endif + +#ifdef DROP_TEST +#define DROP_IOC 1 /* IOC to force failures */ +#define DROP_TARGET 3 /* Target ID to force failures */ +#define DROP_THIS_CMD 10000 /* iteration to drop command */ +static int dropCounter = 0; +static int dropTestOK = 0; /* num did good */ +static int dropTestBad = 0; /* num did bad */ +static int dropTestNum = 0; /* total = good + bad + incomplete */ +static int numTotCmds = 0; +static MPT_FRAME_HDR *dropMfPtr = NULL; +static int numTMrequested = 0; +#endif + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_put_msgframe - Wrapper routine to post message frame to F/W. + * @context: Call back context (ScsiDoneCtx, ScsiScanDvCtx) + * @id: IOC id number + * @mf: Pointer to message frame + * + * Handles the call to mptbase for posting request and queue depth + * tracking. + * + * Returns none. + */ +static void +mptscsih_put_msgframe(int context, int id, MPT_FRAME_HDR *mf) +{ + /* Main banana... */ + atomic_inc(&queue_depth); + if (atomic_read(&queue_depth) > max_qd) { + max_qd = atomic_read(&queue_depth); + dprintk((KERN_INFO MYNAM ": Queue depth now %d.\n", max_qd)); + } + + mpt_put_msg_frame(context, id, mf); + + return; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine. @@ -829,154 +1670,96 @@ int mptscsih_qcmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) { - struct Scsi_Host *host; MPT_SCSI_HOST *hd; MPT_FRAME_HDR *mf; SCSIIORequest_t *pScsiReq; + VirtDevice *pTarget; + MPT_DONE_Q *buffer = NULL; + unsigned long flags; + int target; + int lun; int datadir; - u32 len; - u32 sgdir; + u32 datalen; u32 scsictl; u32 scsidir; u32 qtag; - u32 *mptr; - int sge_spill1; - int frm_sz; - int sges_left; - u32 chain_offset; + u32 cmd_len; int my_idx; - int i; - - dmfprintk((KERN_INFO MYNAM "_qcmd: SCpnt=%p, done()=%p\n", - SCpnt, done)); + int ii; + int rc; + int did_errcode; + int issueCmd; - host = SCpnt->host; - hd = (MPT_SCSI_HOST *) host->hostdata; - -#if 0 - if (host->host_busy >= 60) { - MPT_ADAPTER *ioc = hd->ioc; - u16 pci_command, pci_status; - - /* The IOC is probably hung, investigate status. */ - printk("MPI: IOC probably hung IOCSTAT[%08x] INTSTAT[%08x] REPLYFIFO[%08x]\n", - readl(&ioc->chip.fc9xx->DoorbellValue), - readl(&ioc->chip.fc9xx->IntStatus), - readl(&ioc->chip.fc9xx->ReplyFifo)); - pci_read_config_word(ioc->pcidev, PCI_COMMAND, &pci_command); - pci_read_config_word(ioc->pcidev, PCI_STATUS, &pci_status); - printk("MPI: PCI command[%04x] status[%04x]\n", pci_command, pci_status); - { - /* DUMP req index logger. */ - int begin, end; + did_errcode = 0; + hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + target = SCpnt->target; + lun = SCpnt->lun; + SCpnt->scsi_done = done; - begin = (index_ent - 65) & (128 - 1); - end = index_ent & (128 - 1); - printk("MPI: REQ_INDEX_HIST["); - while (begin != end) { - printk("(%04x)", index_log[begin]); - begin = (begin + 1) & (128 - 1); - } - printk("\n"); - } - sti(); - while(1) - barrier(); - } -#endif + pTarget = hd->Targets[target]; - SCpnt->scsi_done = done; + dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n", + (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done)); /* 20000617 -sralston * GRRRRR... Shouldn't have to do this but... * Do explicit check for REQUEST_SENSE and cached SenseData. * If yes, return cached SenseData. */ -#ifdef MPT_SCSI_CACHE_AUTOSENSE - { - MPT_SCSI_DEV *mpt_sdev; - - mpt_sdev = (MPT_SCSI_DEV *) SCpnt->device->hostdata; - if (mpt_sdev && SCpnt->cmnd[0] == REQUEST_SENSE) { - u8 *dest = NULL; - - if (!SCpnt->use_sg) + if (SCpnt->cmnd[0] == REQUEST_SENSE) { + u8 *dest = NULL; + int sz; + + if (pTarget && (pTarget->tflags & MPT_TARGET_FLAGS_VALID_SENSE)) { + pTarget->tflags &= ~MPT_TARGET_FLAGS_VALID_SENSE; //sjr-moved-here + if (!SCpnt->use_sg) { dest = SCpnt->request_buffer; - else { + } else { struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer; if (sg) - dest = (u8 *) (unsigned long)sg_dma_address(sg); + dest = (u8 *)(ulong)sg_dma_address(sg); } - if (dest && mpt_sdev->sense_sz) { - memcpy(dest, mpt_sdev->CachedSense.data, mpt_sdev->sense_sz); -#ifdef MPT_DEBUG - { - int i; - u8 *sb; - - sb = mpt_sdev->CachedSense.data; - if (sb && ((sb[0] & 0x70) == 0x70)) { - printk(KERN_WARNING MYNAM ": Returning last cached SCSI (hex) SenseData:\n"); - printk(KERN_WARNING " "); - for (i = 0; i < (8 + sb[7]); i++) - printk("%s%02x", i == 13 ? "-" : " ", sb[i]); - printk("\n"); - } - } + if (dest) { + sz = MIN (SCSI_STD_SENSE_BYTES, SCpnt->request_bufflen); + memcpy(dest, pTarget->sense, sz); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) + SCpnt->resid = SCpnt->request_bufflen - sz; #endif + SCpnt->result = 0; + SCpnt->scsi_done(SCpnt); + + //sjr-moved-up//pTarget->tflags &= ~MPT_TARGET_FLAGS_VALID_SENSE; + + return 0; } - SCpnt->resid = SCpnt->request_bufflen - mpt_sdev->sense_sz; - SCpnt->result = 0; -/* spin_lock(SCpnt->host->host_lock); */ - SCpnt->scsi_done(SCpnt); -/* spin_unlock(SCpnt->host->host_lock); */ - return 0; } } -#endif - - if ((mf = mpt_get_msg_frame(ScsiDoneCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); -/* return 1; */ - return 0; - } - pScsiReq = (SCSIIORequest_t *) mf; - my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - - ADD_INDEX_LOG(my_idx); - - /* Map the data portion, if any. */ - sges_left = SCpnt->use_sg; - if (sges_left) { - sges_left = pci_map_sg(hd->ioc->pcidev, - (struct scatterlist *) SCpnt->request_buffer, - sges_left, - scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); - } else if (SCpnt->request_bufflen) { - dma_addr_t buf_dma_addr; - - buf_dma_addr = pci_map_single(hd->ioc->pcidev, - SCpnt->request_buffer, - SCpnt->request_bufflen, - scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); - - /* We hide it here for later unmap. */ - SCpnt->SCp.ptr = (char *)(unsigned long) buf_dma_addr; + if (hd->resetPending) { + /* Prevent new commands from being issued + * while reloading the FW. + */ + did_errcode = 1; + goto did_error; } /* * Put together a MPT SCSI request... */ + if ((mf = mpt_get_msg_frame(ScsiDoneCtx, hd->ioc->id)) == NULL) { + dprintk((MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n", + hd->ioc->name)); + did_errcode = 2; + goto did_error; + } - /* Assume SimpleQ, NO DATA XFER for now */ + pScsiReq = (SCSIIORequest_t *) mf; - len = SCpnt->request_bufflen; - sgdir = 0x00000000; /* SGL IN (host<--ioc) */ - scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER; + my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + + ADD_INDEX_LOG(my_idx); /* * The scsi layer should be handling this stuff @@ -985,25 +1768,27 @@ /* BUG FIX! 19991030 -sralston * TUR's being issued with scsictl=0x02000000 (DATA_IN)! - * Seems we may receive a buffer (len>0) even when there + * Seems we may receive a buffer (datalen>0) even when there * will be no data transfer! GRRRRR... */ datadir = mptscsih_io_direction(SCpnt); if (datadir < 0) { + datalen = SCpnt->request_bufflen; scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */ } else if (datadir > 0) { - sgdir = 0x04000000; /* SGL OUT (host-->ioc) */ + datalen = SCpnt->request_bufflen; scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */ } else { - len = 0; + datalen = 0; + scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER; } - qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; - - /* - * Attach tags to the devices + /* Default to untagged. Once a target structure has been allocated, + * use the Inquiry data to determine if device supports tagged. */ - if (SCpnt->device->tagged_supported) { + qtag = MPI_SCSIIO_CONTROL_UNTAGGED; + if (pTarget && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) + && (SCpnt->device->tagged_supported)) { /* * Some drives are too stupid to handle fairness issues * with tagged queueing. We throw in the odd ordered @@ -1012,87 +1797,24 @@ if ((jiffies - hd->qtag_tick) > (5*HZ)) { qtag = MPI_SCSIIO_CONTROL_ORDEREDQ; hd->qtag_tick = jiffies; - -#if 0 - /* These are ALWAYS zero! - * (Because this is a place for the device driver to dynamically - * assign tag numbers any way it sees fit. That's why -DaveM) - */ - dprintk((KERN_DEBUG MYNAM ": sc->device->current_tag = %08x\n", - SCpnt->device->current_tag)); - dprintk((KERN_DEBUG MYNAM ": sc->tag = %08x\n", - SCpnt->tag)); -#endif } -#if 0 - else { - /* Hmmm... I always see value of 0 here, - * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston - * (Because this is a place for the device driver to dynamically - * assign tag numbers any way it sees fit. That's why -DaveM) - * - * if (SCpnt->tag == HEAD_OF_QUEUE_TAG) - */ - if (SCpnt->device->current_tag == HEAD_OF_QUEUE_TAG) - qtag = MPI_SCSIIO_CONTROL_HEADOFQ; - else if (SCpnt->tag == ORDERED_QUEUE_TAG) - qtag = MPI_SCSIIO_CONTROL_ORDEREDQ; - } -#endif + else + qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; } - scsictl = scsidir | qtag; - frm_sz = hd->ioc->req_sz; - - /* Ack! - * sge_spill1 = 9; + /* Use the above information to set up the message frame */ - sge_spill1 = (frm_sz - (sizeof(SCSIIORequest_t) - sizeof(SGEIOUnion_t) + sizeof(SGEChain32_t))) / 8; - /* spill1: for req_sz == 128 (128-48==80, 80/8==10 SGEs max, first time!), --> use 9 - * spill1: for req_sz == 96 ( 96-48==48, 48/8== 6 SGEs max, first time!), --> use 5 - */ - dsgprintk((KERN_INFO MYNAM ": SG: %x spill1 = %d\n", - my_idx, sge_spill1)); - -#ifdef MPT_DEBUG - if (sges_left > max_sges) { - max_sges = sges_left; - dprintk((KERN_INFO MYNAM ": MPT_MaxSges = %d\n", max_sges)); - } -#endif -#if 0 - if (sges_left > max_num_sges) { - max_num_sges = sges_left; - printk(KERN_INFO MYNAM ": MPT_MaxNumSges = %d\n", max_num_sges); - } -#endif - - dsgprintk((KERN_INFO MYNAM ": SG: %x sges_left = %d (initially)\n", - my_idx, sges_left)); - - chain_offset = 0; - if (sges_left > (sge_spill1+1)) { -#if 0 - chain_offset = 0x1E; -#endif - chain_offset = (frm_sz - 8) / 4; - } - - pScsiReq->TargetID = SCpnt->target; + pScsiReq->TargetID = target; pScsiReq->Bus = hd->port; - pScsiReq->ChainOffset = chain_offset; + pScsiReq->ChainOffset = 0; pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; pScsiReq->CDBLength = SCpnt->cmd_len; - -/* We have 256 bytes alloc'd per IO; let's use it. */ -/* pScsiReq->SenseBufferLength = SNS_LEN(SCpnt); */ - pScsiReq->SenseBufferLength = 255; - + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; pScsiReq->Reserved = 0; - pScsiReq->MsgFlags = 0; + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; pScsiReq->LUN[0] = 0; - pScsiReq->LUN[1] = SCpnt->lun; + pScsiReq->LUN[1] = lun; pScsiReq->LUN[2] = 0; pScsiReq->LUN[3] = 0; pScsiReq->LUN[4] = 0; @@ -1104,223 +1826,780 @@ /* * Write SCSI CDB into the message */ - for (i = 0; i < 12; i++) - pScsiReq->CDB[i] = SCpnt->cmnd[i]; - for (i = 12; i < 16; i++) - pScsiReq->CDB[i] = 0; + cmd_len = SCpnt->cmd_len; + for (ii=0; ii < cmd_len; ii++) + pScsiReq->CDB[ii] = SCpnt->cmnd[ii]; + for (ii=cmd_len; ii < 16; ii++) + pScsiReq->CDB[ii] = 0; /* DataLength */ - pScsiReq->DataLength = cpu_to_le32(len); + pScsiReq->DataLength = cpu_to_le32(datalen); /* SenseBuffer low address */ - pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_pool_dma + (my_idx * 256)); + pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma + + (my_idx * MPT_SENSE_BUFFER_ALLOC)); + + /* Now add the SG list + * Always have a SGE even if null length. + */ + rc = SUCCESS; + if (datalen == 0) { + /* Add a NULL SGE */ + mptscsih_AddNullSGE(pScsiReq); + } else { + /* Add a 32 or 64 bit SGE */ + rc = mptscsih_Add32BitSGE(hd, SCpnt, pScsiReq, my_idx); + } - mptr = (u32 *) &pScsiReq->SGL; - /* - * Now fill in the SGList... - * NOTES: For 128 byte req_sz, we can hold up to 10 simple SGE's - * in the remaining request frame. We -could- do unlimited chains - * but each chain buffer can only be req_sz bytes in size, and - * we lose one SGE whenever we chain. - * For 128 req_sz, we can hold up to 16 SGE's per chain buffer. - * For practical reasons, limit ourselves to 1 overflow chain buffer; - * giving us 9 + 16 == 25 SGE's max. - * At 4 Kb per SGE, that yields 100 Kb max transfer. - * - * (This code needs to be completely changed when/if 64-bit DMA - * addressing is used, since we will be able to fit much less than - * 10 embedded SG entries. -DaveM) - */ - if (sges_left) { - struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer; - u32 v1, v2; - int sge_spill2; - int sge_cur_spill; - int sgCnt; - u8 *pSgBucket; - int chain_sz; - - len = 0; - - /* sge_spill2 = 15; - * spill2: for req_sz == 128 (128/8==16 SGEs max, first time!), --> use 15 - * spill2: for req_sz == 96 ( 96/8==12 SGEs max, first time!), --> use 11 - */ - sge_spill2 = frm_sz / 8 - 1; - dsgprintk((KERN_INFO MYNAM ": SG: %x spill2 = %d\n", - my_idx, sge_spill2)); - - pSgBucket = NULL; - sgCnt = 0; - sge_cur_spill = sge_spill1; - while (sges_left) { -#if 0 - if (sg_dma_len(sg) > max_sgent_len) { - max_sgent_len = sg_dma_len(sg); - printk(KERN_INFO MYNAM ": MPT_MaxSgentLen = %d\n", max_sgent_len); - } -#endif - /* Write one simple SGE */ - v1 = sgdir | 0x10000000 | sg_dma_len(sg); - len += sg_dma_len(sg); - v2 = sg_dma_address(sg); - dsgprintk((KERN_INFO MYNAM ": SG: %x Writing SGE @%p: %08x %08x, sges_left=%d\n", - my_idx, mptr, v1, v2, sges_left)); - *mptr++ = cpu_to_le32(v1); - *mptr++ = cpu_to_le32(v2); - sg++; - sgCnt++; - - if (--sges_left == 0) { - /* re-write 1st word of previous SGE with SIMPLE, - * LE, EOB, and EOL bits! - */ - v1 = 0xD1000000 | sgdir | sg_dma_len(sg-1); - dsgprintk((KERN_INFO MYNAM ": SG: %x (re)Writing SGE @%p: %08x (VERY LAST SGE!)\n", - my_idx, mptr-2, v1)); - *(mptr - 2) = cpu_to_le32(v1); - } else { - if ((sges_left > 1) && ((sgCnt % sge_cur_spill) == 0)) { - dsgprintk((KERN_INFO MYNAM ": SG: %x SG spill at modulo 0!\n", - my_idx)); - - /* Fixup previous SGE with LE bit! */ - v1 = sgdir | 0x90000000 | sg_dma_len(sg-1); - dsgprintk((KERN_INFO MYNAM ": SG: %x (re)Writing SGE @%p: %08x (LAST BUCKET SGE!)\n", - my_idx, mptr-2, v1)); - *(mptr - 2) = cpu_to_le32(v1); - - chain_offset = 0; - /* Going to need another chain? */ - if (sges_left > (sge_spill2+1)) { -#if 0 - chain_offset = 0x1E; + if (rc == SUCCESS) { + hd->ScsiLookup[my_idx] = SCpnt; + SCpnt->host_scribble = NULL; + +#ifdef DROP_TEST + numTotCmds++; + /* If the IOC number and target match, increment + * counter. If counter matches DROP_THIS, do not + * issue command to FW to force a reset. + * Save the MF pointer so we can free resources + * when task mgmt completes. + */ + if ((hd->ioc->id == DROP_IOC) && (target == DROP_TARGET)) { + dropCounter++; + + if (dropCounter == DROP_THIS_CMD) { + dropCounter = 0; + + /* If global is set, then we are already + * doing something - so keep issuing commands. + */ + if (dropMfPtr == NULL) { + dropTestNum++; + dropMfPtr = mf; + atomic_inc(&queue_depth); + printk(MYIOC_s_INFO_FMT + "Dropped SCSI cmd (%p)\n", + hd->ioc->name, SCpnt); + printk("mf (%p) req (%4x) tot cmds (%d)\n", + mf, my_idx, numTotCmds); + + return 0; + } + } + } #endif - chain_offset = (frm_sz - 8) / 4; - chain_sz = frm_sz; - } else { - chain_sz = sges_left * 8; - } - /* write chain SGE at mptr. */ - v1 = 0x30000000 | chain_offset<<16 | chain_sz; - if (pSgBucket == NULL) { - pSgBucket = hd->SgHunks - + (my_idx * frm_sz * MPT_SG_BUCKETS_PER_HUNK); - } else { - pSgBucket += frm_sz; + /* SCSI specific processing */ + issueCmd = 1; + if (hd->is_spi) { + int dvStatus = hd->ioc->spi_data.dvStatus[target]; + + if (dvStatus || hd->ioc->spi_data.forceDv) { + + /* Write SDP1 on 1st I/O to this target */ + if (dvStatus & MPT_SCSICFG_NEGOTIATE) { + mptscsih_writeSDP1(hd, 0, target, hd->negoNvram); + dvStatus &= ~MPT_SCSICFG_NEGOTIATE; + hd->ioc->spi_data.dvStatus[target] = dvStatus; + } + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + if ((dvStatus & MPT_SCSICFG_NEED_DV) || hd->ioc->spi_data.forceDv) { + unsigned long lflags; + /* Schedule DV if necessary */ + spin_lock_irqsave(&dvtaskQ_lock, lflags); + if (!dvtaskQ_active) { + dvtaskQ_active = 1; + mptscsih_dvTask.sync = 0; + mptscsih_dvTask.routine = mptscsih_domainValidation; + mptscsih_dvTask.data = (void *) hd; + + SCHEDULE_TASK(&mptscsih_dvTask); } - v2 = (hd->SgHunksDMA + - ((u8 *)pSgBucket - (u8 *)hd->SgHunks)); - dsgprintk((KERN_INFO MYNAM ": SG: %x Writing SGE @%p: %08x %08x (CHAIN!)\n", - my_idx, mptr, v1, v2)); - *(mptr++) = cpu_to_le32(v1); - *(mptr) = cpu_to_le32(v2); + hd->ioc->spi_data.forceDv = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, lflags); + } - mptr = (u32 *) pSgBucket; - sgCnt = 0; - sge_cur_spill = sge_spill2; + /* Trying to do DV to this target, extend timeout. + * Wait to issue intil flag is clear + */ + if (dvStatus & MPT_SCSICFG_DV_PENDING) { + mod_timer(&SCpnt->eh_timeout, jiffies + 40 * HZ); + issueCmd = 0; } +#endif + } + } + + if (issueCmd) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p)\n", + hd->ioc->name, SCpnt)); + } else { + ddvtprintk((MYIOC_s_INFO_FMT "Pending SCSI cmd (%p)\n", + hd->ioc->name, SCpnt)); + /* Place this command on the pendingQ if possible */ + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->freeQ)) { + buffer = hd->freeQ.head; + Q_DEL_ITEM(buffer); + + /* Save the mf pointer + */ + buffer->argp = (void *)mf; + + /* Add to the pendingQ + */ + Q_ADD_TAIL(&hd->pendingQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + } else { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + SCpnt->result = (DID_BUS_BUSY << 16); + SCpnt->scsi_done(SCpnt); } } } else { - dsgprintk((KERN_INFO MYNAM ": SG: non-SG for %p, len=%d\n", - SCpnt, SCpnt->request_bufflen)); + mptscsih_freeChainBuffers(hd, my_idx); + mpt_free_msg_frame(ScsiDoneCtx, hd->ioc->id, mf); + did_errcode = 3; + goto did_error; + } + + return 0; + +did_error: + dprintk((MYIOC_s_WARN_FMT "_qcmd did_errcode=%d (sc=%p)\n", + hd->ioc->name, did_errcode, SCpnt)); + /* Just wish OS to issue a retry */ + SCpnt->result = (DID_BUS_BUSY << 16); + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->freeQ)) { + buffer = hd->freeQ.head; + Q_DEL_ITEM(buffer); + + /* Set the Scsi_Cmnd pointer + */ + buffer->argp = (void *)SCpnt; + + /* Add to the doneQ + */ + Q_ADD_TAIL(&hd->doneQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + } else { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + SCpnt->scsi_done(SCpnt); + } - if (len > 0) { - dma_addr_t buf_dma_addr; + return 0; +} - buf_dma_addr = (dma_addr_t) (unsigned long)SCpnt->SCp.ptr; - *(mptr++) = cpu_to_le32(0xD1000000|sgdir|SCpnt->request_bufflen); - *(mptr++) = cpu_to_le32(buf_dma_addr); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_Add32BitSGE - Add a 32Bit SGE (plus chain buffers) to the + * SCSIIORequest_t Message Frame. + * @hd: Pointer to MPT_SCSI_HOST structure + * @SCpnt: Pointer to Scsi_Cmnd structure + * @pReq: Pointer to SCSIIORequest_t structure + * + * Returns ... + */ +static int +mptscsih_Add32BitSGE(MPT_SCSI_HOST *hd, Scsi_Cmnd *SCpnt, + SCSIIORequest_t *pReq, int req_idx) +{ + MptSge_t *psge; + MptChain_t *chainSge; + struct scatterlist *sg; + int frm_sz; + int sges_left, sg_done; + int chain_idx = MPT_HOST_NO_CHAIN; + int sgeOffset; + int numSgeSlots, numSgeThisFrame; + u32 sgflags, sgdir, len, thisxfer = 0; + int offset; + int newIndex; + int ii; + dma_addr_t v2; + + sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; + if (sgdir == MPI_SCSIIO_CONTROL_WRITE) { + sgdir = MPT_TRANSFER_HOST_TO_IOC; + } else { + sgdir = MPT_TRANSFER_IOC_TO_HOST; + } + + psge = (MptSge_t *) &pReq->SGL; + frm_sz = hd->ioc->req_sz; + + /* Map the data portion, if any. + * sges_left = 0 if no data transfer. + */ + sges_left = SCpnt->use_sg; + if (SCpnt->use_sg) { + sges_left = pci_map_sg(hd->ioc->pcidev, + (struct scatterlist *) SCpnt->request_buffer, + SCpnt->use_sg, + scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); + } else if (SCpnt->request_bufflen) { + dma_addr_t buf_dma_addr; + scPrivate *my_priv; + + buf_dma_addr = pci_map_single(hd->ioc->pcidev, + SCpnt->request_buffer, + SCpnt->request_bufflen, + scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); + + /* We hide it here for later unmap. */ + my_priv = (scPrivate *) &SCpnt->SCp; + my_priv->p1 = (void *)(ulong) buf_dma_addr; + + dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n", + hd->ioc->name, SCpnt, SCpnt->request_bufflen)); + + /* 0xD1000000 = LAST | EOB | SIMPLE | EOL */ + psge->FlagsLength = cpu_to_le32( + 0xD1000000|sgdir|SCpnt->request_bufflen); + cpu_to_leXX(buf_dma_addr, psge->Address); + + return SUCCESS; + } + + /* Handle the SG case. + */ + sg = (struct scatterlist *) SCpnt->request_buffer; + sg_done = 0; + sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION); + chainSge = NULL; + + /* Prior to entering this loop - the following must be set + * current MF: sgeOffset (bytes) + * chainSge (Null if original MF is not a chain buffer) + * sg_done (num SGE done for this MF) + */ + +nextSGEset: + numSgeSlots = ((frm_sz - sgeOffset) / sizeof(MptSge_t)); + numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; + + sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; + + /* Get first (num - 1) SG elements + * Skip any SG entries with a length of 0 + * NOTE: at finish, sg and psge pointed to NEXT data/location positions + */ + for (ii=0; ii < (numSgeThisFrame-1); ii++) { + thisxfer = sg_dma_len(sg); + if (thisxfer == 0) { + sg ++; /* Get next SG element from the OS */ + sg_done++; + continue; } + + len += thisxfer; + psge->FlagsLength = cpu_to_le32( sgflags | thisxfer ); + v2 = sg_dma_address(sg); + cpu_to_leXX(v2, psge->Address); + + sg++; /* Get next SG element from the OS */ + psge++; /* Point to next SG location in this MF */ + sgeOffset += sizeof(MptSge_t); + sg_done++; } -#ifdef MPT_DEBUG - /* if (SCpnt->request_bufflen > max_xfer) */ - if (len > max_xfer) { - max_xfer = len; - dprintk((KERN_INFO MYNAM ": MPT_MaxXfer = %d\n", max_xfer)); + if (numSgeThisFrame == sges_left) { + /* Add last element, end of buffer and end of list flags. + */ + sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT | + MPT_SGE_FLAGS_END_OF_BUFFER | + MPT_SGE_FLAGS_ADDRESSING | + MPT_SGE_FLAGS_END_OF_LIST; + + /* Add last SGE and set termination flags. + * Note: Last SGE may have a length of 0 - which should be ok. + */ + thisxfer = sg_dma_len(sg); + len += thisxfer; + + psge->FlagsLength = cpu_to_le32( sgflags | thisxfer ); + v2 = sg_dma_address(sg); + cpu_to_leXX(v2, psge->Address); + + sg_done++; + + if (chainSge) { + /* The current buffer is a chain buffer, + * but there is not another one. + * Update the chain element + * Offset and Length fields. + */ + chainSge->NextChainOffset = 0; + sgeOffset += sizeof(MptSge_t); + chainSge->Length = cpu_to_le16(sgeOffset); + } else { + /* The current buffer is the original MF + * and there is no Chain buffer. + */ + pReq->ChainOffset = 0; + } + } else { + /* At least one chain buffer is needed. + * Complete the first MF + * - last SGE element, set the LastElement bit + * - set ChainOffset (words) for orig MF + * (OR finish previous MF chain buffer) + * - update MFStructPtr ChainIndex + * - Populate chain element + * Also + * Loop until done. + */ + + dsgprintk((MYIOC_s_INFO_FMT "SG: Chain Required! sg done %d\n", + hd->ioc->name, sg_done)); + + /* Set LAST_ELEMENT flag for last non-chain element + * in the buffer. Since psge points at the NEXT + * SGE element, go back one SGE element, update the flags + * and reset the pointer. (Note: sgflags & thisxfer are already + * set properly). + */ + if (sg_done) { + psge--; + sgflags = le32_to_cpu (psge->FlagsLength); + sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; + psge->FlagsLength = cpu_to_le32( sgflags ); + psge++; + } + + if (chainSge) { + /* The current buffer is a chain buffer. + * chainSge points to the previous Chain Element. + * Update its chain element Offset and Length (must + * include chain element size) fields. + * Old chain element is now complete. + */ + chainSge->NextChainOffset = (u8) (sgeOffset >> 2); + sgeOffset += sizeof(MptSge_t); + chainSge->Length = cpu_to_le16(sgeOffset); + } else { + /* The original MF buffer requires a chain buffer - + * set the offset. + * Last element in this MF is a chain element. + */ + pReq->ChainOffset = (u8) (sgeOffset >> 2); + } + + sges_left -= sg_done; + + + /* NOTE: psge points to the beginning of the chain element + * in current buffer. Get a chain buffer. + */ + if ((mptscsih_getFreeChainBuffer(hd, &newIndex)) == FAILED) + return FAILED; + + /* Update the tracking arrays. + * If chainSge == NULL, update ReqToChain, else ChainToChain + */ + if (chainSge) { + hd->ChainToChain[chain_idx] = newIndex; + } else { + hd->ReqToChain[req_idx] = newIndex; + } + chain_idx = newIndex; + offset = hd->ioc->req_sz * chain_idx; + + /* Populate the chainSGE for the current buffer. + * - Set chain buffer pointer to psge and fill + * out the Address and Flags fields. + */ + chainSge = (MptChain_t *) psge; + chainSge->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; + cpu_to_leXX ((hd->ChainBufferDMA + offset), chainSge->Address); + + dsgprintk((KERN_INFO " Current buff @ %p (index 0x%x)", + psge, req_idx)); + + /* Start the SGE for the next buffer + */ + psge = (MptSge_t *) (hd->ChainBuffer + offset); + sgeOffset = 0; + sg_done = 0; + + dsgprintk((KERN_INFO " Chain buff @ %p (index 0x%x)\n", + psge, chain_idx)); + + /* Start the SGE for the next buffer + */ + + goto nextSGEset; } -#endif - hd->ScsiLookup[my_idx] = SCpnt; + return SUCCESS; +} - /* Main banana... */ - mpt_put_msg_frame(ScsiDoneCtx, hd->ioc->id, mf); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_AddNullSGE - Add a NULL SGE to the SCSIIORequest_t + * Message Frame. + * @pReq: Pointer to SCSIIORequest_t structure + */ +static void +mptscsih_AddNullSGE(SCSIIORequest_t *pReq) +{ + MptSge_t *psge; - atomic_inc(&queue_depth); - if (atomic_read(&queue_depth) > max_qd) { - max_qd = atomic_read(&queue_depth); - dprintk((KERN_INFO MYNAM ": Queue depth now %d.\n", max_qd)); + psge = (MptSge_t *) &pReq->SGL; + psge->FlagsLength = cpu_to_le32(MPT_SGE_FLAGS_SSIMPLE_READ | 0); + + cpu_to_leXX( (dma_addr_t) -1, psge->Address); + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_getFreeChainBuffes - Function to get a free chain + * from the MPT_SCSI_HOST FreeChainQ. + * @hd: Pointer to the MPT_SCSI_HOST instance + * @req_idx: Index of the SCSI IO request frame. (output) + * + * return SUCCESS or FAILED + */ +static int +mptscsih_getFreeChainBuffer(MPT_SCSI_HOST *hd, int *retIndex) +{ + MPT_FRAME_HDR *chainBuf = NULL; + unsigned long flags; + int rc = FAILED; + int chain_idx = MPT_HOST_NO_CHAIN; + + //spin_lock_irqsave(&hd->FreeChainQlock, flags); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!Q_IS_EMPTY(&hd->FreeChainQ)) { + + int offset; + + chainBuf = hd->FreeChainQ.head; + Q_DEL_ITEM(&chainBuf->u.frame.linkage); + offset = (u8 *)chainBuf - (u8 *)hd->ChainBuffer; + chain_idx = offset / hd->ioc->req_sz; + rc = SUCCESS; } + //spin_unlock_irqrestore(&hd->FreeChainQlock, flags); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - dmfprintk((KERN_INFO MYNAM ": Issued SCSI cmd (%p)\n", SCpnt)); - return 0; + *retIndex = chain_idx; + + dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n", + hd->ioc->name, *retIndex, chainBuf)); + + return rc; } -#ifdef MPT_SCSI_USE_NEW_EH /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - mptscsih_abort - Returns: 0=SUCCESS, else FAILED -*/ + * mptscsih_freeChainBuffers - Function to free chain buffers associated + * with a SCSI IO request + * @hd: Pointer to the MPT_SCSI_HOST instance + * @req_idx: Index of the SCSI IO request frame. + * + * Called if SG chain buffer allocation fails and mptscsih callbacks. + * No return. + */ +static void +mptscsih_freeChainBuffers(MPT_SCSI_HOST *hd, int req_idx) +{ + MPT_FRAME_HDR *chain = NULL; + unsigned long flags; + int chain_idx; + int next; + + /* Get the first chain index and reset + * tracker state. + */ + chain_idx = hd->ReqToChain[req_idx]; + hd->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN; + + while (chain_idx != MPT_HOST_NO_CHAIN) { + + /* Save the next chain buffer index */ + next = hd->ChainToChain[chain_idx]; + + /* Free this chain buffer and reset + * tracker + */ + hd->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN; + + chain = (MPT_FRAME_HDR *) (hd->ChainBuffer + + (chain_idx * hd->ioc->req_sz)); + //spin_lock_irqsave(&hd->FreeChainQlock, flags); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + Q_ADD_TAIL(&hd->FreeChainQ.head, + &chain->u.frame.linkage, MPT_FRAME_HDR); + //spin_unlock_irqrestore(&hd->FreeChainQlock, flags); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + dmfprintk((MYIOC_s_INFO_FMT "FreeChainBuffers (index %d)\n", + hd->ioc->name, chain_idx)); + + /* handle next */ + chain_idx = next; + } + return; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_abort - Abort linux Scsi_Cmnd routine, new_eh variant - * @SCpnt: Pointer to Scsi_Cmnd structure, IO to be aborted +/* + * Reset Handling + */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_TMHandler - Generic handler for SCSI Task Management. + * Fall through to mpt_HardResetHandler if: not operational, too many + * failed TM requests or handshake failure. * - * (linux Scsi_Host_Template.eh_abort_handler routine) + * @ioc: Pointer to MPT_ADAPTER structure + * @type: Task Management type + * @target: Logical Target ID for reset (if appropriate) + * @lun: Logical Unit for reset (if appropriate) + * @ctx2abort: Context for the task to be aborted (if appropriate) + * @sleepFlag: If set, use udelay instead of schedule in handshake code. * - * Returns SUCCESS or FAILED. + * Remark: Currently invoked from a non-interrupt thread (_bh). + * + * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC + * will be active. + * + * Returns 0 for SUCCESS or -1 if FAILED. */ -int -mptscsih_abort(Scsi_Cmnd * SCpnt) +static int +mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; - MPT_SCSI_HOST *hd; - u32 *msg; - u32 ctx2abort; - int i; + MPT_ADAPTER *ioc = NULL; + int rc = -1; + int doTask = 1; + u32 ioc_raw_state; unsigned long flags; - printk(KERN_WARNING MYNAM ": Attempting _ABORT SCSI IO (=%p)\n", SCpnt); - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + /* If FW is being reloaded currently, return success to + * the calling function. + */ + if (!hd) + return 0; - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + ioc = hd->ioc; + dtmprintk((MYIOC_s_INFO_FMT "TMHandler Entered!\n", ioc->name)); + + if (ioc == NULL) { + printk(KERN_ERR MYNAM " TMHandler" " NULL ioc!\n"); + return 0; + } + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + spin_lock_irqsave(&ioc->diagLock, flags); + if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) { + spin_unlock_irqrestore(&ioc->diagLock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->diagLock, flags); + + /* Do not do a Task Management if there are + * too many failed TMs on this adapter. + */ + if (hd->numTMrequests > MPT_HOST_TOO_MANY_TM) + doTask = 0; + + /* Is operational? + */ + ioc_raw_state = mpt_GetIocState(hd->ioc, 0); + +#ifdef MPT_DEBUG_RESET + if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { + printk(MYIOC_s_WARN_FMT + "TM Handler: IOC Not operational! state 0x%x Calling HardResetHandler\n", + hd->ioc->name, ioc_raw_state); + } +#endif + + if (doTask && ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) + && !(ioc_raw_state & MPI_DOORBELL_ACTIVE)) { + + /* Isse the Task Mgmt request. + */ + rc = mptscsih_IssueTaskMgmt(hd, type, target, lun, ctx2abort, sleepFlag); + if (rc) { + printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n", hd->ioc->name); + } else { + printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt Successful!\n", hd->ioc->name); + } + } +#ifdef DROP_TEST + numTMrequested++; + if (numTMrequested > 5) { + rc = 0; /* set to 1 to force a hard reset */ + numTMrequested = 0; + } +#endif + + if (rc) { + dtmprintk((MYIOC_s_INFO_FMT "Falling through to HardReset! \n", + hd->ioc->name)); + rc = mpt_HardResetHandler(hd->ioc, sleepFlag); + } + + dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); +#ifndef MPT_SCSI_USE_NEW_EH + dtmprintk((MYIOC_s_INFO_FMT "TMHandler: _bh_handler state (%d) taskQ count (%d)\n", + ioc->name, mytaskQ_bh_active, hd->taskQcnt)); +#endif + + return rc; +} + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_IssueTaskMgmt - Generic send Task Management function. + * @hd: Pointer to MPT_SCSI_HOST structure + * @type: Task Management type + * @target: Logical Target ID for reset (if appropriate) + * @lun: Logical Unit for reset (if appropriate) + * @ctx2abort: Context for the task to be aborted (if appropriate) + * @sleepFlag: If set, use udelay instead of schedule in handshake code. + * + * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) + * or a non-interrupt thread. In the former, must not call schedule(). + * + * Not all fields are meaningfull for all task types. + * + * Returns 0 for SUCCESS, -999 for "no msg frames", + * else other non-zero value returned. + */ +static int +mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag) +{ + MPT_FRAME_HDR *mf; + SCSITaskMgmt_t *pScsiTm; + int ii; + int retval = 0; + + /* Return Fail to calling function if no message frames available. + */ if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); - return FAILED; + dtmprintk((MYIOC_s_WARN_FMT "IssueTaskMgmt, no msg frames!!\n", + hd->ioc->name)); + //return FAILED; + return -999; } + dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n", + hd->ioc->name, mf)); + /* Format the Request + */ pScsiTm = (SCSITaskMgmt_t *) mf; - msg = (u32 *) mf; - - pScsiTm->TargetID = SCpnt->target; + pScsiTm->TargetID = target; pScsiTm->Bus = hd->port; pScsiTm->ChainOffset = 0; pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; pScsiTm->Reserved = 0; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + pScsiTm->TaskType = type; pScsiTm->Reserved1 = 0; pScsiTm->MsgFlags = 0; - for (i = 0; i < 8; i++) { - u8 val = 0; - if (i == 1) - val = SCpnt->lun; - pScsiTm->LUN[i] = val; + for (ii= 0; ii < 8; ii++) { + pScsiTm->LUN[ii] = 0; + } + pScsiTm->LUN[1] = lun; + + for (ii=0; ii < 7; ii++) + pScsiTm->Reserved2[ii] = 0; + + pScsiTm->TaskMsgContext = ctx2abort; + dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt, ctx2abort (0x%08x), type (%d)\n", + hd->ioc->name, ctx2abort, type)); + + /* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake + mpt_put_msg_frame(hd->ioc->id, mf); + * Save the MF pointer in case the request times out. + */ + hd->tmPtr = mf; + hd->numTMrequests++; + hd->TMtimer.expires = jiffies + HZ*20; /* 20 seconds */ + add_timer(&hd->TMtimer); + + if ((retval = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, + sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, sleepFlag)) + != 0) { + dtmprintk((MYIOC_s_WARN_FMT "_send_handshake FAILED!" + " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, hd->ioc, mf)); + hd->numTMrequests--; + hd->tmPtr = NULL; + del_timer(&hd->TMtimer); + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + return ii; + } + + return retval; +} + +#ifdef MPT_SCSI_USE_NEW_EH /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_abort - Abort linux Scsi_Cmnd routine, new_eh variant + * @SCpnt: Pointer to Scsi_Cmnd structure, IO to be aborted + * + * (linux Scsi_Host_Template.eh_abort_handler routine) + * + * Returns SUCCESS or FAILED. + */ +int +mptscsih_abort(Scsi_Cmnd * SCpnt) +{ + MPT_SCSI_HOST *hd; + MPT_FRAME_HDR *mf; + unsigned long flags; + u32 ctx2abort; + int scpnt_idx; + u8 type; + + printk(KERN_WARNING MYNAM ": Attempting ABORT SCSI IO (=%p)\n", SCpnt); + printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } + + /* Find this command + */ + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. If found in + * doneQ, delete from Q. Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; } - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + /* If this command is pended, then timeout/hang occurred + * during DV. Post command and flush pending Q + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } /* Most important! Set TaskMsgContext to SCpnt's MsgContext! * (the IO to be ABORT'd) @@ -1329,37 +2608,63 @@ * swap it here either. It is an opaque cookie to * the controller, so it does not matter. -DaveM */ - ctx2abort = SCPNT_TO_MSGCTX(SCpnt); - if (ctx2abort == -1) { - printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#2) for SCpnt=%p\n", SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - } else { - dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort)); - pScsiTm->TaskMsgContext = ctx2abort; + mf = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx); + ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext; + /* This thread will not exit until tmPending is cleared + * FIXME - must ensure single threaded....DV conflict possible + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (hd->is_spi) + type = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; + else { + type = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + hd->abortSCpnt = SCpnt; + printk(KERN_WARNING MYNAM ": Attempting ABORT SCSI IO! (sc=%p)\n", SCpnt); + } - /* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake - mpt_put_msg_frame(hd->ioc->id, mf); - */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), msg)) - != 0) { - printk(KERN_WARNING MYNAM - ": WARNING[2] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", - i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); + if (mptscsih_TMHandler(hd, type, + SCpnt->target, SCpnt->lun, ctx2abort, CAN_SLEEP) < 0) { + + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", + hd->ioc->name, SCpnt); + + /* If command not found, do not do callback, + * just return failed. CHECKME + */ + if (hd->ScsiLookup[scpnt_idx] != NULL) { + //atomic_dec(&queue_depth); + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_unlock_irqrestore(sc->host->host_lock, flags); } + + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + } + + + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); } - //return SUCCESS; return FAILED; } @@ -1375,63 +2680,95 @@ int mptscsih_dev_reset(Scsi_Cmnd * SCpnt) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; MPT_SCSI_HOST *hd; - u32 *msg; - int i; + MPT_FRAME_HDR *mf; unsigned long flags; + int scpnt_idx; + u8 type; printk(KERN_WARNING MYNAM ": Attempting _TARGET_RESET (%p)\n", SCpnt); printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } - if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; + /* Find this command + */ + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. If found in + * doneQ, delete from Q. Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); SCpnt->scsi_done(SCpnt); - return FAILED; + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; } - pScsiTm = (SCSITaskMgmt_t *) mf; - msg = (u32*)mf; + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } - pScsiTm->TargetID = SCpnt->target; - pScsiTm->Bus = hd->port; - pScsiTm->ChainOffset = 0; - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + /* This thread will not exit until tmPending is cleared + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (hd->is_spi) + type = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; + else { + type = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + printk(KERN_WARNING MYNAM ": Attempting Target Reset! (sc=%p)\n", SCpnt); + } - pScsiTm->Reserved = 0; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = 0; + if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + SCpnt->target, 0, 0, CAN_SLEEP) < 0) { + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", + hd->ioc->name, SCpnt); - /* _TARGET_RESET goes to LUN 0 always! */ - for (i = 0; i < 8; i++) - pScsiTm->LUN[i] = 0; - - /* Control: No data direction, set task mgmt bit? */ - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + /* If command not found, do not do callback, + * just returned failed. CHECKME. + */ + if (hd->ScsiLookup[scpnt_idx] != NULL) { + //atomic_dec(&queue_depth); + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + } - pScsiTm->TaskMsgContext = cpu_to_le32(0); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + } -/* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake - mpt_put_msg_frame(hd->ioc->id, mf); -*/ -/* FIXME! Check return status! */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), msg)) - != 0) { - printk(KERN_WARNING MYNAM - ": WARNING[3] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", - i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); } //return SUCCESS; @@ -1450,68 +2787,96 @@ int mptscsih_bus_reset(Scsi_Cmnd * SCpnt) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; MPT_SCSI_HOST *hd; - u32 *msg; - int i; + MPT_FRAME_HDR *mf; unsigned long flags; + int scpnt_idx; printk(KERN_WARNING MYNAM ": Attempting _BUS_RESET (%p)\n", SCpnt); printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } - if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; + /* Find this command + */ + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. If found in + * doneQ, delete from Q. Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); SCpnt->scsi_done(SCpnt); - return FAILED; + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; } - pScsiTm = (SCSITaskMgmt_t *) mf; - msg = (u32 *) mf; + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } - pScsiTm->TargetID = SCpnt->target; - pScsiTm->Bus = hd->port; - pScsiTm->ChainOffset = 0; - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + /* This thread will not exit until tmPending is cleared + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - pScsiTm->Reserved = 0; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = 0; + if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + 0, 0, 0, CAN_SLEEP) < 0) { + + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", + hd->ioc->name, SCpnt); - for (i = 0; i < 8; i++) - pScsiTm->LUN[i] = 0; + /* If command not found, do not do callback, + * just returned failed. CHECKME. + */ + if (hd->ScsiLookup[scpnt_idx] != NULL) { + //atomic_dec(&queue_depth); + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + } - /* Control: No data direction, set task mgmt bit? */ - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - pScsiTm->TaskMsgContext = cpu_to_le32(0); + return FAILED; + } -/* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake - mpt_put_msg_frame(hd->ioc->id, mf); -*/ -/* FIXME! Check return status! */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), msg)) - != 0) { - printk(KERN_WARNING MYNAM - ": WARNING[4] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", - i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); } return SUCCESS; } -#if 0 /* { */ +#if 0 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_host_reset - Perform a SCSI host adapter RESET! @@ -1523,11 +2888,61 @@ * Returns SUCCESS or FAILED. */ int -mptscsih_host_reset(Scsi_Cmnd * SCpnt) +mptscsih_host_reset(Scsi_Cmnd *SCpnt) { - return FAILED; + MPT_SCSI_HOST *hd; + MPT_FRAME_HDR *mf; + + printk(KERN_WARNING MYNAM ": Attempting HOST_RESET (%p)\n", SCpnt); + printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } + + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } + + /* This thread will not exit until tmPending is cleared + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0) { + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); // sjr-added + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); // sjr-added + return FAILED; + } + + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); + } + + return SUCCESS; } -#endif /* } */ +#endif #else /* MPT_SCSI old EH stuff... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1546,57 +2961,118 @@ MPT_FRAME_HDR *mf; struct tq_struct *ptaskfoo; unsigned long flags; + int scpnt_idx; - printk(KERN_WARNING MYNAM ": Scheduling _ABORT SCSI IO (=%p)\n", SCpnt); - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + printk(KERN_WARNING MYNAM ": OldAbort scheduling ABORT SCSI IO (sc=%p)\n", SCpnt); + printk(KERN_WARNING " IOs outstanding = %d\n", atomic_read(&queue_depth)); if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { - SCpnt->result = DID_ABORT << 16; + printk(KERN_WARNING " WARNING - OldAbort, NULL hostdata ptr!!\n"); + SCpnt->result = DID_ERROR << 16; + SCpnt->scsi_done(SCpnt); + return SCSI_ABORT_NOT_RUNNING; + } + + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. + * If found in doneQ, delete from Q. + * Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; SCpnt->scsi_done(SCpnt); return SCSI_ABORT_SUCCESS; + } else { + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } } /* * Check to see if there's already an ABORT queued for this guy. */ - mf = search_taskQ(0,SCpnt,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + mf = search_taskQ(0, SCpnt, hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); if (mf != NULL) { + dtmprintk((MYIOC_s_INFO_FMT "OldAbort:Abort Task PENDING cmd (%p) taskQ depth (%d)\n", + hd->ioc->name, SCpnt, hd->taskQcnt)); + return SCSI_ABORT_PENDING; + } + + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + /* If IOC is reloading FW, return PENDING. + */ + spin_lock_irqsave(&hd->ioc->diagLock, flags); + if (hd->ioc->diagPending) { + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); return SCSI_ABORT_PENDING; } + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); + /* If there are no message frames what should we do? + */ if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); - return SCSI_ABORT_BUSY; + printk((KERN_WARNING " WARNING - OldAbort, no msg frames!!\n")); + /* We are out of message frames! + * Call the reset handler to do a FW reload. + */ + printk((KERN_WARNING " Reloading Firmware!!\n")); + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING " Firmware Reload FAILED!!\n")); + } + return SCSI_ABORT_PENDING; } /* - * Add ourselves to (end of) mpt_scsih_taskQ. + * Add ourselves to (end of) taskQ . * Check to see if our _bh is running. If NOT, schedule it. */ - dslprintk((KERN_INFO MYNAM ": spinlock#2\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - Q_ADD_TAIL(&mpt_scsih_taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); - mpt_scsih_taskQ_cnt++; - /* Yikes - linkage! */ -/* SCpnt->host_scribble = (unsigned char *)mf; */ - mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + Q_ADD_TAIL(&hd->taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); + hd->taskQcnt++; + atomic_inc(&mpt_taskQdepth); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + spin_lock_irqsave(&mytaskQ_lock, flags); + + /* Save the original SCpnt mf pointer + */ + SCpnt->host_scribble = (u8 *) MPT_INDEX_2_MFPTR (hd->ioc, scpnt_idx); + + /* For the time being, force bus reset on any abort + * requests for the 1030 FW. + */ + if (hd->is_spi) + mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; + else + mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + mf->u.frame.linkage.argp1 = SCpnt; - if (! mpt_scsih_taskQ_bh_active) { - mpt_scsih_taskQ_bh_active = 1; + mf->u.frame.linkage.argp2 = (void *) hd; + + dtmprintk((MYIOC_s_INFO_FMT "OldAbort:_bh_handler state (%d) taskQ count (%d)\n", + hd->ioc->name, mytaskQ_bh_active, hd->taskQcnt)); + + if (! mytaskQ_bh_active) { + mytaskQ_bh_active = 1; /* * Oh how cute, no alloc/free/mgmt needed if we use * (bottom/unused portion of) MPT request frame. */ - ptaskfoo = (struct tq_struct *) ((u8*)mf + hd->ioc->req_sz - sizeof(*ptaskfoo)); + ptaskfoo = (struct tq_struct *) &mptscsih_ptaskfoo; ptaskfoo->sync = 0; ptaskfoo->routine = mptscsih_taskmgmt_bh; ptaskfoo->data = SCpnt; SCHEDULE_TASK(ptaskfoo); } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_unlock_irqrestore(&mytaskQ_lock, flags); return SCSI_ABORT_PENDING; } @@ -1618,9 +3094,10 @@ MPT_FRAME_HDR *mf; struct tq_struct *ptaskfoo; unsigned long flags; + int scpnt_idx; - printk(KERN_WARNING MYNAM ": Scheduling _BUS_RESET (=%p)\n", SCpnt); - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + printk(KERN_WARNING MYNAM ": OldReset scheduling BUS_RESET (sc=%p)\n", SCpnt); + printk(KERN_WARNING " IOs outstanding = %d\n", atomic_read(&queue_depth)); if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { SCpnt->result = DID_RESET << 16; @@ -1628,48 +3105,102 @@ return SCSI_RESET_SUCCESS; } + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. + * If found in doneQ, delete from Q. + * Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + SCpnt->scsi_done(SCpnt); + return SCSI_RESET_SUCCESS; + } else { + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } + } + + /* + * Check to see if there's an ABORT_TASK queued for this guy. + * If so, delete. + */ + search_taskQ(1, SCpnt, hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + /* * Check to see if there's already a BUS_RESET queued for this guy. */ - mf = search_taskQ(0,SCpnt,MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS); + mf = search_taskQ(0, SCpnt, hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS); if (mf != NULL) { + dtmprintk((MYIOC_s_INFO_FMT "OldReset:Reset Task PENDING cmd (%p) taskQ depth (%d)\n", + hd->ioc->name, SCpnt, hd->taskQcnt)); return SCSI_RESET_PENDING; } + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + /* If IOC is reloading FW, return PENDING. + */ + spin_lock_irqsave(&hd->ioc->diagLock, flags); + if (hd->ioc->diagPending) { + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); + return SCSI_RESET_PENDING; + } + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); + if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); - return SCSI_RESET_PUNT; + /* We are out of message frames! + * Call the reset handler to do a FW reload. + */ + printk((KERN_WARNING " Reloading Firmware!!\n")); + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING " Firmware Reload FAILED!!\n")); + } + return SCSI_RESET_PENDING; } /* - * Add ourselves to (end of) mpt_scsih_taskQ. + * Add ourselves to (end of) taskQ. * Check to see if our _bh is running. If NOT, schedule it. */ - dslprintk((KERN_INFO MYNAM ": spinlock#3\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - Q_ADD_TAIL(&mpt_scsih_taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); - mpt_scsih_taskQ_cnt++; - /* Yikes - linkage! */ -/* SCpnt->host_scribble = (unsigned char *)mf; */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + Q_ADD_TAIL(&hd->taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); + hd->taskQcnt++; + atomic_inc(&mpt_taskQdepth); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + spin_lock_irqsave(&mytaskQ_lock, flags); + + /* Save the original SCpnt mf pointer + */ + SCpnt->host_scribble = (u8 *) MPT_INDEX_2_MFPTR (hd->ioc, scpnt_idx); + mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; mf->u.frame.linkage.argp1 = SCpnt; - if (! mpt_scsih_taskQ_bh_active) { - mpt_scsih_taskQ_bh_active = 1; + mf->u.frame.linkage.argp2 = (void *) hd; + + dtmprintk((MYIOC_s_INFO_FMT "OldReset: _bh_handler state (%d) taskQ count (%d)\n", + hd->ioc->name, mytaskQ_bh_active, hd->taskQcnt)); + + if (! mytaskQ_bh_active) { + mytaskQ_bh_active = 1; /* * Oh how cute, no alloc/free/mgmt needed if we use * (bottom/unused portion of) MPT request frame. */ - ptaskfoo = (struct tq_struct *) ((u8*)mf + hd->ioc->req_sz - sizeof(*ptaskfoo)); + ptaskfoo = (struct tq_struct *) &mptscsih_ptaskfoo; ptaskfoo->sync = 0; ptaskfoo->routine = mptscsih_taskmgmt_bh; ptaskfoo->data = SCpnt; SCHEDULE_TASK(ptaskfoo); } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); - + spin_unlock_irqrestore(&mytaskQ_lock, flags); return SCSI_RESET_PENDING; } @@ -1686,147 +3217,171 @@ void mptscsih_taskmgmt_bh(void *sc) { + MPT_ADAPTER *ioc; Scsi_Cmnd *SCpnt; - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; + MPT_FRAME_HDR *mf = NULL; MPT_SCSI_HOST *hd; u32 ctx2abort = 0; - int i; unsigned long flags; + int scpnt_idx; + int did; u8 task_type; - dslprintk((KERN_INFO MYNAM ": spinlock#4\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - mpt_scsih_taskQ_bh_active = 1; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_lock_irqsave(&mytaskQ_lock, flags); + mytaskQ_bh_active = 1; + spin_unlock_irqrestore(&mytaskQ_lock, flags); - while (1) { - current->state = TASK_INTERRUPTIBLE; + do { + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/4); + did = 0; - /* - * We MUST remove item from taskQ *before* we format the - * frame as a SCSITaskMgmt request and send it down to the IOC. - */ - dslprintk((KERN_INFO MYNAM ": spinlock#5\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - if (Q_IS_EMPTY(&mpt_scsih_taskQ)) { - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); - break; - } - mf = mpt_scsih_taskQ.head; - Q_DEL_ITEM(&mf->u.frame.linkage); - mpt_scsih_taskQ_cnt--; - mpt_scsih_active_taskmgmt_mf = mf; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); - - SCpnt = (Scsi_Cmnd*)mf->u.frame.linkage.argp1; - if (SCpnt == NULL) { - printk(KERN_ERR MYNAM ": ERROR - TaskMgmt has NULL SCpnt! (%p:%p)\n", mf, SCpnt); - continue; - } - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; - pScsiTm = (SCSITaskMgmt_t *) mf; + for (ioc = mpt_adapter_find_first(); ioc != NULL; ioc = mpt_adapter_find_next(ioc)) { + if (ioc->sh) { + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd == NULL) { + printk(KERN_ERR MYNAM + ": ERROR - TaskMgmt NULL SCSI Host!" + "(ioc=%p, sh=%p hd=%p)\n", + ioc, ioc->sh, hd); + continue; + } - for (i = 0; i < 8; i++) { - pScsiTm->LUN[i] = 0; - } + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (Q_IS_EMPTY(&hd->taskQ)) { + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - task_type = mf->u.frame.linkage.arg1; - if (task_type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { - printk(KERN_WARNING MYNAM ": Attempting _ABORT SCSI IO! (mf=%p:sc=%p)\n", - mf, SCpnt); - - /* Most important! Set TaskMsgContext to SCpnt's MsgContext! - * (the IO to be ABORT'd) - * - * NOTE: Since we do not byteswap MsgContext, we do not - * swap it here either. It is an opaque cookie to - * the controller, so it does not matter. -DaveM - */ - ctx2abort = SCPNT_TO_MSGCTX(SCpnt); - if (ctx2abort == -1) { - printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#1) for SCpnt=%p\n", SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - continue; - } - pScsiTm->LUN[1] = SCpnt->lun; - } - else if (task_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) - { - printk(KERN_WARNING MYNAM ": Attempting _BUS_RESET! (against SCSI IO mf=%p:sc=%p)\n", mf, SCpnt); - } -#if 0 - else if (task_type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {} - else if (task_type == MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET) {} -#endif + /* If we ever find a non-empty queue, + * keep the handler alive + */ + did++; - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + /* tmPending is SMP lock-protected */ + if (hd->tmPending || hd->tmPtr) { + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } + hd->tmPending = 1; - pScsiTm->TargetID = SCpnt->target; - pScsiTm->Bus = hd->port; - pScsiTm->ChainOffset = 0; - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + /* Process this request + */ + mf = hd->taskQ.head; + Q_DEL_ITEM(&mf->u.frame.linkage); + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + SCpnt = (Scsi_Cmnd*)mf->u.frame.linkage.argp1; + if (SCpnt == NULL) { + printk(KERN_ERR MYNAM ": ERROR - TaskMgmt has NULL SCpnt! (mf=%p:sc=%p)\n", + mf, SCpnt); + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - pScsiTm->Reserved = 0; - pScsiTm->TaskType = task_type; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = 0; + /* Get the ScsiLookup index pointer + * from the SC pointer. + */ + if (!SCpnt->host_scribble || ((MPT_SCSI_HOST *)SCpnt->host->hostdata != hd)) { + /* The command associated with the + * abort/reset request must have + * completed and this is a stale + * request. We are done. + * Free the current MF and continue. + */ + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + scpnt_idx = MFPTR_2_MPT_INDEX(hd->ioc, SCpnt->host_scribble); + if (scpnt_idx != SCPNT_TO_LOOKUP_IDX(SCpnt)) { + /* Error! this should never happen!! + */ + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort)); - pScsiTm->TaskMsgContext = ctx2abort; + task_type = mf->u.frame.linkage.arg1; + ctx2abort = 0; + if (task_type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + MPT_FRAME_HDR *SCpntMf; + + /* + * Most important! Set TaskMsgContext to SCpnt's MsgContext! + * (the IO to be ABORT'd) + * + * NOTE: Since we do not byteswap MsgContext, we do not + * swap it here either. It is an opaque cookie to + * the controller, so it does not matter. -DaveM + */ + SCpntMf = (MPT_FRAME_HDR *) SCpnt->host_scribble; + ctx2abort = SCpntMf->u.frame.hwhdr.msgctxu.MsgContext; + + hd->abortSCpnt = SCpnt; + printk(KERN_WARNING MYNAM ": Attempting ABORT SCSI IO! (mf=%p:sc=%p)\n", + mf, SCpnt); + } - /* Control: No data direction, set task mgmt bit? */ + /* The TM handler will allocate a new mf, + * so free the current mf. + */ + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + mf = NULL; - /* - * As of MPI v0.10 this request can NOT be sent (normally) - * via FIFOs. So we can't: - * mpt_put_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - * SCSITaskMgmt requests MUST be sent ONLY via - * Doorbell/handshake now. :-( - */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), (u32*) mf)) - != 0) { - printk(KERN_WARNING MYNAM ": WARNING[1] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - } else { - /* Spin-Wait for TaskMgmt complete!!! */ - while (mpt_scsih_active_taskmgmt_mf != NULL) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ/4); + if (mptscsih_TMHandler(hd, task_type, SCpnt->target, SCpnt->lun, ctx2abort, NO_SLEEP) < 0) { + + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(KERN_WARNING MYNAM + ": WARNING[1] - IOC error processing TaskMgmt request (sc=%p)\n", SCpnt); + + if (hd->ScsiLookup[scpnt_idx] != NULL) { + atomic_dec(&queue_depth); + SCpnt->result = DID_SOFT_ERROR << 16; + spin_lock_irqsave(SCpnt->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(SCpnt->host->host_lock, + flags); + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + } + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + hd->abortSCpnt = NULL; + } } } - } + if (atomic_read(&mpt_taskQdepth) > 0) + did++; - dslprintk((KERN_INFO MYNAM ": spinlock#6\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - mpt_scsih_taskQ_bh_active = 0; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + } while ( did ); + + spin_lock_irqsave(&mytaskQ_lock, flags); + mytaskQ_bh_active = 0; + spin_unlock_irqrestore(&mytaskQ_lock, flags); return; } - #endif /* } */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptscsih_taskmgmt_complete - Callback routine, gets registered to - * Fusion MPT base driver + * mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver * @ioc: Pointer to MPT_ADAPTER structure * @mf: Pointer to SCSI task mgmt request frame - * @r: Pointer to SCSI task mgmt reply frame + * @mr: Pointer to SCSI task mgmt reply frame * * This routine is called from mptbase.c::mpt_interrupt() at the completion * of any SCSI task management request. @@ -1836,73 +3391,165 @@ * Returns 1 indicating alloc'd request frame ptr should be freed. */ static int -mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r) +mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { SCSITaskMgmtReply_t *pScsiTmReply; SCSITaskMgmt_t *pScsiTmReq; - u8 tmType; -#ifndef MPT_SCSI_USE_NEW_EH + MPT_SCSI_HOST *hd = NULL; unsigned long flags; -#endif - - dprintk((KERN_INFO MYNAM ": SCSI TaskMgmt completed mf=%p, r=%p\n", - mf, r)); + u8 tmType = 0; -#ifndef MPT_SCSI_USE_NEW_EH - dslprintk((KERN_INFO MYNAM ": spinlock#7\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - /* It better be the active one! */ - if (mf != mpt_scsih_active_taskmgmt_mf) { - printk(KERN_ERR MYNAM ": ERROR! Non-active TaskMgmt (=%p) completed!\n", mf); - mpt_scsih_active_taskmgmt_mf = NULL; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + dtmprintk((MYIOC_s_INFO_FMT "SCSI TaskMgmt completed (mf=%p,r=%p)\n", + ioc->name, mf, mr)); + if (ioc->sh) { + /* Depending on the thread, a timer is activated for + * the TM request. Delete this timer on completion of TM. + * Decrement count of outstanding TM requests. + */ + hd = (MPT_SCSI_HOST *)ioc->sh->hostdata; + if (hd->tmPtr) { + del_timer(&hd->TMtimer); + } + dtmprintk((MYIOC_s_INFO_FMT "taskQcnt (%d)\n", + ioc->name, hd->taskQcnt)); + } else { + dtmprintk((MYIOC_s_WARN_FMT "TaskMgmt Complete: NULL Scsi Host Ptr\n", + ioc->name)); return 1; } -#ifdef MPT_DEBUG - if ((mf == NULL) || - (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(KERN_ERR MYNAM ": ERROR! NULL or BAD TaskMgmt ptr (=%p)!\n", mf); - mpt_scsih_active_taskmgmt_mf = NULL; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + if (mr == NULL) { + dtmprintk((MYIOC_s_WARN_FMT "ERROR! TaskMgmt Reply: NULL Request %p\n", + ioc->name, mf)); return 1; - } -#endif - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); -#endif - - if (r != NULL) { - pScsiTmReply = (SCSITaskMgmtReply_t*)r; + } else { + pScsiTmReply = (SCSITaskMgmtReply_t*)mr; pScsiTmReq = (SCSITaskMgmt_t*)mf; /* Figure out if this was ABORT_TASK, TARGET_RESET, or BUS_RESET! */ tmType = pScsiTmReq->TaskType; - dprintk((KERN_INFO MYNAM ": TaskType = %d\n", tmType)); - dprintk((KERN_INFO MYNAM ": TerminationCount = %d\n", - le32_to_cpu(pScsiTmReply->TerminationCount))); + dtmprintk((KERN_INFO " TaskType = %d, TerminationCount=%d\n", + tmType, le32_to_cpu(pScsiTmReply->TerminationCount))); /* Error? (anything non-zero?) */ if (*(u32 *)&pScsiTmReply->Reserved2[0]) { - dprintk((KERN_INFO MYNAM ": SCSI TaskMgmt (%d) - Oops!\n", tmType)); - dprintk((KERN_INFO MYNAM ": IOCStatus = %04xh\n", - le16_to_cpu(pScsiTmReply->IOCStatus))); - dprintk((KERN_INFO MYNAM ": IOCLogInfo = %08xh\n", + u16 iocstatus; + + iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + dtmprintk((KERN_INFO " SCSI TaskMgmt (%d) - Oops!\n", tmType)); + dtmprintk((KERN_INFO " IOCStatus = %04xh\n", iocstatus)); + dtmprintk((KERN_INFO " IOCLogInfo = %08xh\n", le32_to_cpu(pScsiTmReply->IOCLogInfo))); + + /* clear flags and continue. + */ + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + hd->abortSCpnt = NULL; +#ifdef DROP_TEST + if (dropMfPtr) + dropTestBad++; +#endif + /* If an internal command is present + * or the TM failed - reload the FW. + * FC FW may respond FAILED to an ABORT + */ + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { + if ((hd->cmdPtr) || + (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED)) { + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING + " Firmware Reload FAILED!!\n")); + } + } + } } else { - dprintk((KERN_INFO MYNAM ": SCSI TaskMgmt (%d) SUCCESS!\n", tmType)); + dtmprintk((KERN_INFO " SCSI TaskMgmt SUCCESS!\n")); + +#ifndef MPT_SCSI_USE_NEW_EH + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { + /* clean taskQ - remove tasks associated with + * completed commands. + */ + clean_taskQ(hd); + } else if (tmType == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + /* If taskQ contains another request + * for this SCpnt, delete this request. + */ + search_taskQ_for_cmd(hd->abortSCpnt, hd); + } +#endif + hd->numTMrequests--; + hd->abortSCpnt = NULL; + flush_doneQ(hd); + +#ifdef DROP_TEST + if (dropMfPtr) + dropTestOK++; +#endif + } + } + +#ifdef DROP_TEST + { + Scsi_Cmnd *sc; + unsigned long flags; + u16 req_idx; + + /* Free resources for the drop test MF and chain buffers. + */ + if (dropMfPtr) { + req_idx = le16_to_cpu(dropMfPtr->u.frame.hwhdr.msgctxu.fld.req_idx); + sc = hd->ScsiLookup[req_idx]; + if (sc == NULL) { + printk(MYIOC_s_ERR_FMT + "Drop Test: NULL ScsiCmd ptr!\n", + ioc->name); + } else { + sc->host_scribble = NULL; + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) + sc->result = DID_RESET << 16; + else + sc->result = DID_ABORT << 16; + + hd->ScsiLookup[req_idx] = NULL; + atomic_dec(&queue_depth); + spin_lock_irqsave(sc->host->host_lock, flags); + sc->scsi_done(sc); /* Issue callback */ + spin_unlock_irqrestore(sc->host->host_lock, flags); + + mptscsih_freeChainBuffers(hd, req_idx); + mpt_free_msg_frame(ScsiDoneCtx, ioc->id, dropMfPtr); + + printk(MYIOC_s_INFO_FMT + "Free'd Dropped cmd (%p)\n", + hd->ioc->name, sc); + printk(MYIOC_s_INFO_FMT + "mf (%p) reqidx (%4x)\n", + hd->ioc->name, dropMfPtr, + req_idx); + printk(MYIOC_s_INFO_FMT + "Num Tot (%d) Good (%d) Bad (%d) \n", + hd->ioc->name, dropTestNum, + dropTestOK, dropTestBad); + } + dropMfPtr = NULL; } } +#endif #ifndef MPT_SCSI_USE_NEW_EH /* * Signal to _bh thread that we finished. + * This IOC can now process another TM command. */ - dslprintk((KERN_INFO MYNAM ": spinlock#8\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - mpt_scsih_active_taskmgmt_mf = NULL; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + dtmprintk((MYIOC_s_INFO_FMT "taskmgmt_complete: (=%p) done! Num Failed(%d) Task Count (%d)\n", + ioc->name, mf, hd->numTMrequests, hd->taskQcnt)); #endif + hd->tmPtr = NULL; + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); return 1; } @@ -1930,6 +3577,45 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* + * OS entry point to adjust the queue_depths on a per-device basis. + * Called once per device the bus scan. Use it to force the queue_depth + * member to 1 if a device does not support Q tags. + */ +void +mptscsih_select_queue_depths(struct Scsi_Host *sh, Scsi_Device *sdList) +{ + struct scsi_device *device; + VirtDevice *pTarget; + MPT_SCSI_HOST *hd; + int ii, max; + + for (device = sdList; device; device = device->next) { + + if (device->host != sh) + continue; + + hd = (MPT_SCSI_HOST *) sh->hostdata; + if (!hd) + continue; + + if (hd->Targets) { + if (hd->is_spi) + max = MPT_MAX_SCSI_DEVICES; + else + max = MPT_MAX_FC_DEVICES; + + for (ii=0; ii < max; ii++) { + pTarget = hd->Targets[ii]; + if (pTarget && !(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) { + device->queue_depth = 1; + } + } + } + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* * Private routines... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1956,6 +3642,7 @@ case REASSIGN_BLOCKS: case PERSISTENT_RESERVE_OUT: case 0xea: + case 0xa3: return 1; /* No data transfer commands */ @@ -1980,7 +3667,7 @@ return 0; case RESERVE_10: - if (cmd->cmnd[1] & 0x03) /* RESERSE:{LongID|Extent} (data out phase)? */ + if (cmd->cmnd[1] & 0x03) /* RESERVE:{LongID|Extent} (data out phase)? */ return 1; else return 0; @@ -2000,16 +3687,27 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Utility function to copy sense data from the scsi_cmnd buffer + * to the FC and SCSI target structures. + * + */ static void copy_sense_data(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply) { - MPT_SCSI_DEV *mpt_sdev = NULL; + VirtDevice *target; + SCSIIORequest_t *pReq; u32 sense_count = le32_to_cpu(pScsiReply->SenseCount); - char devFoo[32]; + int index; + char devFoo[96]; IO_Info_t thisIo; - if (sc && sc->device) - mpt_sdev = (MPT_SCSI_DEV*) sc->device->hostdata; + /* Get target structure + */ + pReq = (SCSIIORequest_t *) mf; + index = (int) pReq->TargetID; + target = hd->Targets[index]; + if (hd->is_multipath && sc->device->hostdata) + target = (VirtDevice *) sc->device->hostdata; if (sense_count) { u8 *sense_data; @@ -2017,49 +3715,84 @@ /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * 256)); + sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); - /* Cache SenseData for this SCSI device! */ - if (mpt_sdev) { - memcpy(mpt_sdev->CachedSense.data, sense_data, sense_count); - mpt_sdev->sense_sz = sense_count; + + /* save sense data to the target device + */ + if (target) { + int sz; + + sz = MIN(pReq->SenseBufferLength, sense_count); + if (sz > SCSI_STD_SENSE_BYTES) + sz = SCSI_STD_SENSE_BYTES; + memcpy(target->sense, sense_data, sz); + target->tflags |= MPT_TARGET_FLAGS_VALID_SENSE; } - } else { - dprintk((KERN_INFO MYNAM ": Hmmm... SenseData len=0! (?)\n")); - } + /* Log SMART data (asc = 0x5D, non-IM case only) if required. + */ + if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) { + if ((sense_data[12] == 0x5D) && (target->raidVolume == 0)) { + int idx; + MPT_ADAPTER *ioc = hd->ioc; + + idx = ioc->eventContext % ioc->eventLogSize; + ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; + ioc->events[idx].eventContext = ioc->eventContext; + + ioc->events[idx].data[0] = (pReq->LUN[1] << 24) || + (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) || + (pReq->Bus << 8) || pReq->TargetID; + + ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; + + ioc->eventContext++; + } + } - thisIo.cdbPtr = sc->cmnd; - thisIo.sensePtr = sc->sense_buffer; - thisIo.SCSIStatus = pScsiReply->SCSIStatus; - thisIo.DoDisplay = 1; - sprintf(devFoo, "ioc%d,scsi%d:%d", hd->ioc->id, sc->target, sc->lun); - thisIo.DevIDStr = devFoo; + /* Print an error report for the user. + */ + thisIo.cdbPtr = sc->cmnd; + thisIo.sensePtr = sc->sense_buffer; + thisIo.SCSIStatus = pScsiReply->SCSIStatus; + thisIo.DoDisplay = 1; + if (hd->is_multipath) + sprintf(devFoo, "%d:%d:%d \"%s\"", + hd->ioc->id, + pReq->TargetID, + pReq->LUN[1], + target->dev_vol_name); + else + sprintf(devFoo, "%d:%d:%d", hd->ioc->id, sc->target, sc->lun); + thisIo.DevIDStr = devFoo; /* fubar */ - thisIo.dataPtr = NULL; - thisIo.inqPtr = NULL; - if (sc->device) { - thisIo.inqPtr = sc->device->vendor-8; /* FIXME!!! */ + thisIo.dataPtr = NULL; + thisIo.inqPtr = NULL; + if (sc->device) { + thisIo.inqPtr = sc->device->vendor-8; /* FIXME!!! */ + } + (void) mpt_ScsiHost_ErrorReport(&thisIo); + + } else { + dprintk((MYIOC_s_INFO_FMT "Hmmm... SenseData len=0! (?)\n", + hd->ioc->name)); } - (void) mpt_ScsiHost_ErrorReport(&thisIo); return; } -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static u32 -SCPNT_TO_MSGCTX(Scsi_Cmnd *sc) +SCPNT_TO_LOOKUP_IDX(Scsi_Cmnd *sc) { MPT_SCSI_HOST *hd; - MPT_FRAME_HDR *mf; int i; hd = (MPT_SCSI_HOST *) sc->host->hostdata; for (i = 0; i < hd->ioc->req_depth; i++) { if (hd->ScsiLookup[i] == sc) { - mf = MPT_INDEX_2_MFPTR(hd->ioc, i); - return mf->u.frame.hwhdr.msgctxu.MsgContext; + return i; } } @@ -2075,18 +3808,262 @@ # include "../../scsi/scsi_module.c" #endif +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Search the pendingQ for a command with specific index. + * If found, delete and return mf pointer + * If not found, return NULL + */ +static MPT_FRAME_HDR * +mptscsih_search_pendingQ(MPT_SCSI_HOST *hd, int scpnt_idx) +{ + unsigned long flags; + MPT_DONE_Q *buffer; + MPT_FRAME_HDR *mf = NULL; + MPT_FRAME_HDR *cmdMfPtr = NULL; + + ddvtprintk((MYIOC_s_INFO_FMT ": search_pendingQ called...", hd->ioc->name)); + cmdMfPtr = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx); + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->pendingQ)) { + buffer = hd->pendingQ.head; + do { + mf = (MPT_FRAME_HDR *) buffer->argp; + if (mf == cmdMfPtr) { + Q_DEL_ITEM(buffer); + + /* clear the arg pointer + */ + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + break; + } + mf = NULL; + } while ((buffer = buffer->forw) != (MPT_DONE_Q *) &hd->pendingQ); + } + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + ddvtprintk((" ...return %p\n", mf)); + return mf; +} + +/* Post all commands on the pendingQ to the FW. + * Lock Q when deleting/adding members + * Lock io_request_lock for OS callback. + */ +static void +post_pendingQ_commands(MPT_SCSI_HOST *hd) +{ + MPT_FRAME_HDR *mf; + MPT_DONE_Q *buffer; + unsigned long flags; + + /* Flush the pendingQ. + */ + ddvtprintk((MYIOC_s_INFO_FMT ": post_pendingQ_commands called\n", hd->ioc->name)); + while (1) { + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (Q_IS_EMPTY(&hd->pendingQ)) { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + break; + } + + buffer = hd->pendingQ.head; + /* Delete from Q + */ + Q_DEL_ITEM(buffer); + + mf = (MPT_FRAME_HDR *) buffer->argp; + if (!mf) { + /* This should never happen */ + printk(MYIOC_s_WARN_FMT "post_pendingQ_commands: mf %p\n", hd->ioc->name, mf); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + continue; + } + + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + ddvtprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (mf=%p)\n", + hd->ioc->name, mf)); + + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + } + + return; +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { - dprintk((KERN_INFO MYNAM ": IOC %s_reset routed to SCSI host driver!\n", + MPT_SCSI_HOST *hd = NULL; + unsigned long flags; + + dtmprintk((KERN_WARNING MYNAM + ": IOC %s_reset routed to SCSI host driver!\n", reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); + /* If a FW reload request arrives after base installed but + * before all scsi hosts have been attached, then an alt_ioc + * may have a NULL sh pointer. + */ + if ((ioc->sh == NULL) || (ioc->sh->hostdata == NULL)) + return 0; + else + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (reset_phase == MPT_IOC_PRE_RESET) { - /* FIXME! Do pre-reset cleanup */ + dtmprintk((MYIOC_s_WARN_FMT "Do Pre-Diag Reset handling\n", + ioc->name)); + + /* Clean Up: + * 1. Set Hard Reset Pending Flag + * All new commands go to doneQ + */ + hd->resetPending = 1; + + /* 2. Flush running commands + * Clean drop test code - if compiled + * Clean ScsiLookup (and associated memory) + * AND clean mytaskQ + */ + + /* 2a. Drop Test Command. + */ +#ifdef DROP_TEST + { + Scsi_Cmnd *sc; + unsigned long flags; + u16 req_idx; + + /* Free resources for the drop test MF + * and chain buffers. + */ + if (dropMfPtr) { + req_idx = le16_to_cpu(dropMfPtr->u.frame.hwhdr.msgctxu.fld.req_idx); + sc = hd->ScsiLookup[req_idx]; + if (sc == NULL) { + printk(MYIOC_s_ERR_FMT + "Drop Test: NULL ScsiCmd ptr!\n", + ioc->name); + } else { + sc->host_scribble = NULL; + sc->result = DID_RESET << 16; + hd->ScsiLookup[req_idx] = NULL; + atomic_dec(&queue_depth); + spin_lock_irqsave(sc->host->host_lock, flags); + sc->scsi_done(sc); /* Issue callback */ + spin_unlock_irqrestore(sc->host->host_lock, flags); + } + + mptscsih_freeChainBuffers(hd, req_idx); + mpt_free_msg_frame(ScsiDoneCtx, ioc->id, dropMfPtr); + printk(MYIOC_s_INFO_FMT + "Free'd: mf (%p) reqidx (%4x)\n", + hd->ioc->name, dropMfPtr, + req_idx); + } + dropMfPtr = NULL; + } +#endif + + /* 2b. Reply to OS all known outstanding I/O commands. + */ + mptscsih_flush_running_cmds(hd); + + /* 2c. If there was an internal command that + * has not completed, configuration or io request, + * free these resources. + */ + if (hd->cmdPtr) { + del_timer(&hd->timer); + mpt_free_msg_frame(ScsiScanDvCtx, ioc->id, hd->cmdPtr); + atomic_dec(&queue_depth); + } + + /* 2d. If a task management has not completed, + * free resources associated with this request. + */ + if (hd->tmPtr) { + del_timer(&hd->TMtimer); + mpt_free_msg_frame(ScsiTaskCtx, ioc->id, hd->tmPtr); + } + +#ifndef MPT_SCSI_USE_NEW_EH + /* 2e. Delete all commands on taskQ + * Should be superfluous - as this taskQ should + * be empty. + */ + clean_taskQ(hd); +#endif + dtmprintk((MYIOC_s_WARN_FMT "Pre-Reset handling complete.\n", + ioc->name)); + } else { - /* FIXME! Do post-reset cleanup */ + dtmprintk((MYIOC_s_WARN_FMT "Do Post-Diag Reset handling\n", + ioc->name)); + + /* Once a FW reload begins, all new OS commands are + * redirected to the doneQ w/ a reset status. + * Init all control structures. + */ + + /* ScsiLookup initialization + */ + { + int ii; + for (ii=0; ii < hd->ioc->req_depth; ii++) + hd->ScsiLookup[ii] = NULL; + } + + /* 2. Chain Buffer initialization + */ + mptscsih_initChainBuffers(hd, 0); + + /* 3. tmPtr clear + */ + if (hd->tmPtr) { + hd->tmPtr = NULL; + } + + /* 4. Renegotiate to all devices, if SCSI + */ + if (hd->is_spi) + mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM); + + /* 5. Enable new commands to be posted + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + hd->resetPending = 0; + hd->numTMrequests = 0; + + /* 6. If there was an internal command, + * wake this process up. + */ + if (hd->cmdPtr) { + /* + * Wake up the original calling thread + */ + hd->pLocal = &hd->localReply; + hd->pLocal->completion = MPT_SCANDV_DID_RESET; + scandv_wait_done = 1; + wake_up(&scandv_waitq); + hd->cmdPtr = NULL; + } + + /* 7. Flush doneQ + */ + flush_doneQ(hd); + + dtmprintk((MYIOC_s_WARN_FMT "Post-Reset handling complete.\n", + ioc->name)); } return 1; /* currently means nothing really */ @@ -2096,9 +4073,11 @@ static int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { + MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; - dprintk((KERN_INFO MYNAM ": MPT event (=%02Xh) routed to SCSI host driver!\n", event)); + dprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", + ioc->name, event)); switch (event) { case MPI_EVENT_UNIT_ATTENTION: /* 03 */ @@ -2125,12 +4104,64 @@ * CHECKME! Falling thru... */ + case MPI_EVENT_INTEGRATED_RAID: /* 0B */ +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + /* negoNvram set to 0 if DV enabled and to USE_NVRAM if + * if DV disabled + */ + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd->negoNvram == 0) { + ScsiCfgData *pSpi; + Ioc3PhysDisk_t *pPDisk; + int numPDisk; + u8 reason; + u8 physDiskNum; + + reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16; + if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) { + /* New or replaced disk. + * Set DV flag and schedule DV. + */ + pSpi = &ioc->spi_data; + physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24; + if (pSpi->pIocPg3) { + pPDisk = pSpi->pIocPg3->PhysDisk; + numPDisk =pSpi->pIocPg3->NumPhysDisks; + + while (numPDisk) { + if (physDiskNum == pPDisk->PhysDiskNum) { + pSpi->dvStatus[pPDisk->PhysDiskID] = MPT_SCSICFG_NEED_DV; + pSpi->forceDv = MPT_SCSICFG_NEED_DV; + ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID)); + break; + } + pPDisk++; + numPDisk--; + } + } + } + } +#endif + +#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) + printk("Raid Event RF: "); + { + u32 *m = (u32 *)pEvReply; + int ii; + int n = (int)pEvReply->MsgLength; + for (ii=6; ii < n; ii++) + printk(" %08x", le32_to_cpu(m[ii])); + printk("\n"); + } +#endif + break; + case MPI_EVENT_NONE: /* 00 */ case MPI_EVENT_LOG_DATA: /* 01 */ case MPI_EVENT_STATE_CHANGE: /* 02 */ case MPI_EVENT_EVENT_CHANGE: /* 0A */ default: - dprintk((KERN_INFO MYNAM ": Ignoring event (=%02Xh)\n", event)); + dprintk((KERN_INFO " Ignoring event (=%02Xh)\n", event)); break; } @@ -2149,7 +4180,6 @@ //extern ASCQ_Table_t *mpt_ASCQ_TablePtr; //extern int mpt_ASCQ_TableSz; -/* Lie! */ #define MYNAM "mptscsih" #endif /* } */ @@ -2327,8 +4357,6 @@ * Sense_Key_Specific() - If Sense_Key_Specific_Valid bit is set, * then print additional information via * a call to SDMS_SystemAlert(). - * - * Return: nothing */ static void Sense_Key_Specific(IO_Info_t *ioop, char *msg1) { @@ -2463,25 +4491,27 @@ } else if (ASC == 0x29 && (ASCQ < sizeof(asc_29_ascq_NN_strings)/sizeof(char*)-1)) *s1 = asc_29_ascq_NN_strings[ASCQ]; /* - * else { leave all *s[1-4] values pointing to the empty "" string } + * Else { leave all *s[1-4] values pointing to the empty "" string } */ return *s1; } /* - * Need to check ASC here; if it is "special," then - * the ASCQ is variable, and indicates failed component number. - * We must treat the ASCQ as a "don't care" while searching the - * mptscsih_ASCQ_Table[] by masking it off, and then restoring it later - * on when we actually need to identify the failed component. + * Need to check ASC here; if it is "special," then + * the ASCQ is variable, and indicates failed component number. + * We must treat the ASCQ as a "dont care" while searching the + * mptscsih_ASCQ_Table[] by masking it off, and then restoring it later + * on when we actually need to identify the failed component. */ if (SPECIAL_ASCQ(ASC,ASCQ)) ASCQ = 0xFF; - /* OK, now search mptscsih_ASCQ_Table[] for a matching entry */ + /* OK, now search mptscsih_ASCQ_Table[] for a matching entry */ for (idx = 0; mptscsih_ASCQ_TablePtr && idx < mpt_ASCQ_TableSz; idx++) - if ((ASC == mptscsih_ASCQ_TablePtr[idx].ASC) && (ASCQ == mptscsih_ASCQ_TablePtr[idx].ASCQ)) - return (*s1 = mptscsih_ASCQ_TablePtr[idx].Description); + if ((ASC == mptscsih_ASCQ_TablePtr[idx].ASC) && (ASCQ == mptscsih_ASCQ_TablePtr[idx].ASCQ)) { + *s1 = mptscsih_ASCQ_TablePtr[idx].Description; + return *s1; + } if ((ASC >= 0x80) || (ASCQ >= 0x80)) *s1 = ascq_vendor_uniq; @@ -2523,6 +4553,9 @@ * SPINNING UP (02,04/01), * LOGICAL UNIT NOT SUPPORTED (05,25/00), etc. */ + if (sk == SK_NO_SENSE) { + return 0; + } if ( (sk==SK_UNIT_ATTENTION && asc==0x29 && (ascq==0x00 || ascq==0x01)) || (sk==SK_NOT_READY && asc==0x04 && ascq==0x01) || (sk==SK_ILLEGAL_REQUEST && asc==0x25 && ascq==0x00) @@ -2591,6 +4624,2608 @@ PrintF(("%s\n", foo)); return l; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_initTarget - Target, LUN alloc/free functionality. + * @hd: Pointer to MPT_SCSI_HOST structure + * @bus_id: Bus number (?) + * @target_id: SCSI target id + * @lun: SCSI LUN id + * @data: Pointer to data + * @dlen: Number of INQUIRY bytes + * + * NOTE: It's only SAFE to call this routine if data points to + * sane & valid STANDARD INQUIRY data! + * + * Allocate and initialize memory for this target. + * Save inquiry data. + * + * Returns pointer to VirtDevice structure. + */ +static VirtDevice * +mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen) +{ + VirtDevice *vdev; + int sz; + + dprintk((MYIOC_s_INFO_FMT "initTarget (%d,%d,%d) called, hd=%p\n", + hd->ioc->name, bus_id, target_id, lun, hd)); + + if ((vdev = hd->Targets[target_id]) == NULL) { + if ((vdev = kmalloc(sizeof(VirtDevice), GFP_ATOMIC)) == NULL) { + printk(MYIOC_s_ERR_FMT "initTarget kmalloc(%d) FAILED!\n", + hd->ioc->name, (int)sizeof(VirtDevice)); + } else { + memset(vdev, 0, sizeof(VirtDevice)); + rwlock_init(&vdev->VdevLock); + Q_INIT(&vdev->WaitQ, void); + Q_INIT(&vdev->SentQ, void); + Q_INIT(&vdev->DoneQ, void); + vdev->tflags = 0; + vdev->ioc_id = hd->ioc->id; + vdev->target_id = target_id; + vdev->bus_id = bus_id; + + hd->Targets[target_id] = vdev; + dprintk((KERN_INFO " *NEW* Target structure (id %d) @ %p\n", + target_id, vdev)); + } + } + + if (vdev && data) { + if (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) { + + /* Copy the inquiry data - if we haven't yet. + */ + sz = MIN(dlen, SCSI_STD_INQUIRY_BYTES); + + memcpy (vdev->inq_data, data, sz); + vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY; + + /* Update the target capabilities + */ + mptscsih_setTargetNegoParms(hd, vdev); + } + + /* Is LUN supported? If so, upper 3 bits will be 0 + * in first byte of inquiry data. + */ + if ((*data & 0xe0) == 0) + vdev->luns |= (1 << lun); + } + + if (vdev) { + if (hd->ioc->spi_data.isRaid & (1 << target_id)) + vdev->raidVolume = 1; + else + vdev->raidVolume = 0; + } + + dprintk((KERN_INFO " target = %p\n", vdev)); + return vdev; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Update the target negotiation parameters based on the + * the Inquiry data, adapter capabilities, and NVRAM settings. + * + */ +void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target) +{ + int id = (int) target->target_id; + int nvram; + char canQ = 0; + u8 width = MPT_NARROW; + u8 factor = MPT_ASYNC; + u8 offset = 0; + u8 version, nfactor; + ScsiCfgData *pspi_data = &hd->ioc->spi_data; + + /* Set flags based on Inquiry data + */ + if (target->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) { + version = target->inq_data[2] & 0x03; + if (version < 2) { + width = 0; + factor = MPT_ULTRA2; + offset = pspi_data->maxSyncOffset; + } else { + if (target->inq_data[7] & 0x20) { + width = 1; + } + + if (target->inq_data[7] & 0x10) { + if (version == 2) + factor = MPT_ULTRA2; + else + factor = MPT_ULTRA320; + + offset = pspi_data->maxSyncOffset; + } else { + factor = MPT_ASYNC; + offset = 0; + } + } + + if (target->inq_data[7] & 0x02) { + canQ = 1; + } + + /* Update tflags based on NVRAM settings. (SCSI only) + */ + if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) { + nvram = pspi_data->nvram[id]; + nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8; + + if (width) + width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; + + if (offset > 0) { + /* Ensure factor is set to the + * maximum of: adapter, nvram, inquiry + */ + if (nfactor) { + if (nfactor < pspi_data->minSyncFactor ) + nfactor = pspi_data->minSyncFactor; + + factor = MAX (factor, nfactor); + if (factor == MPT_ASYNC) + offset = 0; + } else { + offset = 0; + factor = MPT_ASYNC; + } + } else { + factor = MPT_ASYNC; + } + } + + /* Make sure data is consistent + */ + if ((!width) && (factor < MPT_ULTRA2)) { + factor = MPT_ULTRA2; + } + + /* Save the data to the target structure. + */ + target->minSyncFactor = factor; + target->maxOffset = offset; + target->maxWidth = width; + if (canQ) { + target->tflags |= MPT_TARGET_FLAGS_Q_YES; + } + + target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO; + + /* Disable all wide (sync) extended messages + * if device is narrow (async). + */ + target->negoFlags = 0; + if (!width) + target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE; + + if (!offset) + target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC; + } + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Clear sense valid flag. + */ +static void clear_sense_flag(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq) +{ + VirtDevice *target; + int index = (int) pReq->TargetID; + + if ((target = hd->Targets[index])) { + target->tflags &= ~MPT_TARGET_FLAGS_VALID_SENSE; + } + + return; +} + +/* + * If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return. + * Else set the NEED_DV flag after Read Capacity Issued (disks) + * or Mode Sense (cdroms). Tapes, key off of Inquiry command. + */ +static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq, char *data) +{ + u8 cmd = pReq->CDB[0]; + + if (pReq->LUN[1] != 0) + return; + + if (hd->negoNvram != 0) + return; + + if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE) || + ((cmd == INQUIRY) && ((data[0] & 0x1F) == 0x01))) { + u8 dvStatus = hd->ioc->spi_data.dvStatus[pReq->TargetID]; + if (!(dvStatus & MPT_SCSICFG_DV_DONE)) { + ScsiCfgData *pSpi = &hd->ioc->spi_data; + if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) { + /* Set NEED_DV for all hidden disks + */ + Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk; + int numPDisk = pSpi->pIocPg3->NumPhysDisks; + + while (numPDisk) { + pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV; + ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID)); + pPDisk++; + numPDisk--; + } + } + pSpi->dvStatus[pReq->TargetID] |= MPT_SCSICFG_NEED_DV; + ddvtprintk(("NEED_DV set for visible disk id %d\n", + pReq->TargetID)); + }; + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * SCSI Config Page functionality ... + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_setDevicePage1Flags - add Requested and Configuration fields flags + * based on width, factor and offset parameters. + * @width: bus width + * @factor: sync factor + * @offset: sync offset + * @requestedPtr: pointer to requested values (updated) + * @configurationPtr: pointer to configuration values (updated) + * @flags: flags to block WDTR or SDTR negotiation + * + * Return: None. + * + * Remark: Called by writeSDP1 and _dv_params + */ +static void +mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags) +{ + u8 nowide = flags & MPT_TARGET_NO_NEGO_WIDE; + u8 nosync = flags & MPT_TARGET_NO_NEGO_SYNC; + + *configurationPtr = 0; + *requestedPtr = width ? MPI_SCSIDEVPAGE1_RP_WIDE : 0; + *requestedPtr |= (offset << 16) | (factor << 8); + + if (width && offset && !nowide && !nosync) { + if (factor < MPT_ULTRA160) { + *requestedPtr |= (MPI_SCSIDEVPAGE1_RP_IU + MPI_SCSIDEVPAGE1_RP_DT + + MPI_SCSIDEVPAGE1_RP_QAS); + } else if (factor < MPT_ULTRA2) { + *requestedPtr |= MPI_SCSIDEVPAGE1_RP_DT; + } + } + + if (nowide) + *configurationPtr |= MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED; + + if (nosync) + *configurationPtr |= MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED; + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_writeSDP1 - write SCSI Device Page 1 + * @hd: Pointer to a SCSI Host Strucutre + * @portnum: IOC port number + * @target_id: writeSDP1 for single ID + * @flags: MPT_SCSICFG_ALL_IDS, MPT_SCSICFG_USE_NVRAM + * + * Return: -EFAULT if read of config page header fails + * or 0 if success. + * + * Remark: If a target has been found, the settings from the + * target structure are used, else the device is set + * to async/narrow. + * + * Remark: Called during init and after a FW reload. + * Remark: We do not wait for a return, write pages sequentially. + */ +static int +mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags) +{ + MPT_ADAPTER *ioc = hd->ioc; + Config_t *pReq = NULL; + SCSIDevicePage1_t *pData = NULL; + VirtDevice *pTarget = NULL; + MPT_FRAME_HDR *mf; + MptSge_t *psge; + dma_addr_t dataDma; + u16 req_idx; + u32 frameOffset; + u32 requested, configuration, flagsLength; + int ii, nvram; + int id = 0, maxid = 0; + u8 width; + u8 factor; + u8 offset; + u8 bus = 0; + u8 negoFlags; + + if (ioc->spi_data.sdp1length == 0) + return 0; + + if (flags & MPT_SCSICFG_ALL_IDS) { + id = 0; + maxid = ioc->sh->max_id - 1; + } else if (ioc->sh) { + id = target_id; + maxid = MIN(id, ioc->sh->max_id - 1); + } + + for (; id <= maxid; id++) { + if (id == ioc->pfacts[portnum].PortSCSIID) + continue; + + if (flags & MPT_SCSICFG_USE_NVRAM) { + /* Use NVRAM, adapter maximums and target settings. + * Data over-riden by target structure information, if present + */ + width = ioc->spi_data.maxBusWidth; + offset = ioc->spi_data.maxSyncOffset; + factor = ioc->spi_data.minSyncFactor; + if (ioc->spi_data.nvram && (ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { + nvram = ioc->spi_data.nvram[id]; + + if (width) + width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; + + if (offset > 0) { + factor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8; + if (factor == 0) { + /* Key for async */ + factor = MPT_ASYNC; + offset = 0; + } else if (factor < ioc->spi_data.minSyncFactor) { + factor = ioc->spi_data.minSyncFactor; + } + } else + factor = MPT_ASYNC; + } + + /* Set the negotiation flags. + */ + negoFlags = 0; + if (!width) + negoFlags |= MPT_TARGET_NO_NEGO_WIDE; + + if (!offset) + negoFlags |= MPT_TARGET_NO_NEGO_SYNC; + } else { + width = 0; + factor = MPT_ASYNC; + offset = 0; + negoFlags = MPT_TARGET_NO_NEGO_SYNC; + } + + /* If id is not a raid volume, get the updated + * transmission settings from the target structure. + */ + if (hd->Targets && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) { + width = pTarget->maxWidth; + factor = pTarget->minSyncFactor; + offset = pTarget->maxOffset; + negoFlags = pTarget->negoFlags; + pTarget = NULL; + } + mptscsih_setDevicePage1Flags(width, factor, offset, + &requested, &configuration, negoFlags); + + + if (negoFlags == (MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC)) + continue; + + /* Get a MF for this command. + */ + if ((mf = mpt_get_msg_frame(ScsiDoneCtx, ioc->id)) == NULL) { + dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n", + ioc->name)); + return -EAGAIN; + } + + /* Set the request and the data pointers. + * Request takes: 36 bytes (32 bit SGE) + * SCSI Device Page 1 requires 16 bytes + * 40 + 16 <= size of SCSI IO Request = 56 bytes + * and MF size >= 64 bytes. + * Place data at end of MF. + */ + pReq = (Config_t *)mf; + + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + frameOffset = ioc->req_sz - sizeof(SCSIDevicePage1_t); + + pData = (SCSIDevicePage1_t *)((u8 *) mf + frameOffset); + dataDma = ioc->req_frames_dma + (req_idx * ioc->req_sz) + frameOffset; + + /* Complete the request frame (same for all requests). + */ + pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + pReq->Reserved = 0; + pReq->ChainOffset = 0; + pReq->Function = MPI_FUNCTION_CONFIG; + pReq->Reserved1[0] = 0; + pReq->Reserved1[1] = 0; + pReq->Reserved1[2] = 0; + pReq->MsgFlags = 0; + for (ii=0; ii < 8; ii++) { + pReq->Reserved2[ii] = 0; + } + pReq->Header.PageVersion = ioc->spi_data.sdp1version; + pReq->Header.PageLength = ioc->spi_data.sdp1length; + pReq->Header.PageNumber = 1; + pReq->Header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + pReq->PageAddress = cpu_to_le32(id | (bus << 8 )); + + /* Add a SGE to the config request. + */ + flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | ioc->spi_data.sdp1length * 4; + + psge = (MptSge_t *) &pReq->PageBufferSGE; + psge->FlagsLength = cpu_to_le32(flagsLength); + cpu_to_leXX(dataDma, psge->Address); + + /* Set up the common data portion + */ + pData->Header.PageVersion = pReq->Header.PageVersion; + pData->Header.PageLength = pReq->Header.PageLength; + pData->Header.PageNumber = pReq->Header.PageNumber; + pData->Header.PageType = pReq->Header.PageType; + pData->RequestedParameters = cpu_to_le32(requested); + pData->Reserved = 0; + pData->Configuration = cpu_to_le32(configuration); + + dprintk((MYIOC_s_INFO_FMT + "write SDP1: id %d pgaddr 0x%x req 0x%x config 0x%x\n", + ioc->name, id, (id | (bus<<8)), + requested, configuration)); + + mptscsih_put_msgframe(ScsiDoneCtx, ioc->id, mf); + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_taskmgmt_timeout - Call back for timeout on a + * task management request. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + * + */ +static void mptscsih_taskmgmt_timeout(unsigned long data) +{ + MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data; + + dprintk((MYIOC_s_WARN_FMT "TM request timed out!\n", hd->ioc->name)); + /* Delete the timer that triggered this callback. + * Remark: del_timer checks to make sure timer is active + * before deleting. + */ + del_timer(&hd->TMtimer); + + /* Call the reset handler. Already had a TM request + * timeout - so issue a diagnostic reset + */ + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING " Firmware Reload FAILED!!\n")); + } + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Bus Scan and Domain Validation functionality ... + */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_scandv_complete - Scan and DV callback routine registered + * to Fustion MPT (base) driver. + * + * @ioc: Pointer to MPT_ADAPTER structure + * @mf: Pointer to original MPT request frame + * @mr: Pointer to MPT reply frame (NULL if TurboReply) + * + * This routine is called from mpt.c::mpt_interrupt() at the completion + * of any SCSI IO request. + * This routine is registered with the Fusion MPT (base) driver at driver + * load/init time via the mpt_register() API call. + * + * Returns 1 indicating alloc'd request frame ptr should be freed. + * + * Remark: Sets a completion code and (possibly) saves sense data + * in the IOC member localReply structure. + * Used ONLY for bus scan, DV and other internal commands. + */ +static int +mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +{ + MPT_SCSI_HOST *hd; + SCSIIORequest_t *pReq; + int completionCode; + u16 req_idx; + + if ((mf == NULL) || + (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { + printk(MYIOC_s_ERR_FMT + "ScanDvComplete, %s req frame ptr! (=%p)\n", + ioc->name, mf?"BAD":"NULL", mf); + goto wakeup; + } + + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + del_timer(&hd->timer); + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + hd->ScsiLookup[req_idx] = NULL; + pReq = (SCSIIORequest_t *) mf; + + if (mf != hd->cmdPtr) { + printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p)\n", + hd->ioc->name, mf, hd->cmdPtr); + } + hd->cmdPtr = NULL; + + ddvprintk((MYIOC_s_INFO_FMT "ScanDvComplete (mf=%p,mr=%p)\n", + hd->ioc->name, mf, mr)); + + atomic_dec(&queue_depth); + + hd->pLocal = &hd->localReply; + + /* If target struct exists, clear sense valid flag. + */ + clear_sense_flag(hd, pReq); + + if (mr == NULL) { + completionCode = MPT_SCANDV_GOOD; + } else { + SCSIIOReply_t *pReply; + u16 status; + + pReply = (SCSIIOReply_t *) mr; + + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + + ddvprintk((KERN_NOTICE " IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh, IOCLogInfo=%08xh\n", + status, pReply->SCSIState, pReply->SCSIStatus, + le32_to_cpu(pReply->IOCLogInfo))); + + switch(status) { + + case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ + completionCode = MPT_SCANDV_SELECTION_TIMEOUT; + break; + + case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ + completionCode = MPT_SCANDV_DID_RESET; + break; + + case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ + case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ + if (pReply->Function == MPI_FUNCTION_CONFIG) { + ConfigReply_t *pr = (ConfigReply_t *)mr; + completionCode = MPT_SCANDV_GOOD; + hd->pLocal->header.PageVersion = pr->Header.PageVersion; + hd->pLocal->header.PageLength = pr->Header.PageLength; + hd->pLocal->header.PageNumber = pr->Header.PageNumber; + hd->pLocal->header.PageType = pr->Header.PageType; + + } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) { + /* If the RAID Volume request is successful, + * return GOOD, else indicate that + * some type of error occurred. + */ + MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr; + if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS) + completionCode = MPT_SCANDV_GOOD; + else + completionCode = MPT_SCANDV_SOME_ERROR; + + } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { + VirtDevice *target; + u8 *sense_data; + int sz; + + /* save sense data in global & target structure + */ + completionCode = MPT_SCANDV_SENSE; + hd->pLocal->scsiStatus = pReply->SCSIStatus; + sense_data = ((u8 *)hd->ioc->sense_buf_pool + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + sz = MIN (pReq->SenseBufferLength, + SCSI_STD_SENSE_BYTES); + memcpy(hd->pLocal->sense, sense_data, sz); + + target = hd->Targets[pReq->TargetID]; + if (target) { + memcpy(target->sense, sense_data, sz); + target->tflags + |= MPT_TARGET_FLAGS_VALID_SENSE; + } + + ddvprintk((KERN_NOTICE " Check Condition, sense ptr %p\n", + sense_data)); + } else if (pReply->SCSIState & (MPI_SCSI_STATE_AUTOSENSE_FAILED | + MPI_SCSI_STATE_NO_SCSI_STATUS)) { + completionCode = MPT_SCANDV_DID_RESET; + } else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { + completionCode = MPT_SCANDV_DID_RESET; + } else { + /* If no error, this will be equivalent + * to MPT_SCANDV_GOOD + */ + completionCode = (int) pReply->SCSIStatus; + } + break; + + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ + if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) + completionCode = MPT_SCANDV_DID_RESET; + else + completionCode = MPT_SCANDV_SOME_ERROR; + break; + + default: + completionCode = MPT_SCANDV_SOME_ERROR; + break; + + } /* switch(status) */ + + ddvprintk((KERN_NOTICE " completionCode set to %08xh\n", + completionCode)); + } /* end of address reply case */ + + hd->pLocal->completion = completionCode; + + /* MF and RF are freed in mpt_interrupt + */ +wakeup: + /* Free Chain buffers (will never chain) in scan or dv */ + //mptscsih_freeChainBuffers(hd, req_idx); + + /* + * Wake up the original calling thread + */ + scandv_wait_done = 1; + wake_up(&scandv_waitq); + + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_timer_expired - Call back for timer process. + * Used only for dv functionality. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + * + */ +static void mptscsih_timer_expired(unsigned long data) +{ + MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data; +#ifndef MPT_SCSI_USE_NEW_EH + unsigned long flags; +#endif + + + ddvprintk((MYIOC_s_WARN_FMT "Timer Expired! Cmd %p\n", hd->ioc->name, hd->cmdPtr)); + + if (hd->cmdPtr) { + MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr; + + if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) { + /* Desire to issue a task management request here. + * TM requests MUST be single threaded. + * If old eh code and no TM current, issue request. + * If new eh code, do nothing. Wait for OS cmd timeout + * for bus reset. + */ +#ifndef MPT_SCSI_USE_NEW_EH + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + return; + } else + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + 0, 0, 0, NO_SLEEP) < 0) { + printk(MYIOC_s_WARN_FMT "TM FAILED!\n", hd->ioc->name); + } +#else + ddvtprintk((MYIOC_s_NOTE_FMT "DV Cmd Timeout: NoOp\n", hd->ioc->name)); +#endif + } else { + /* Perform a FW reload */ + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", hd->ioc->name); + } + } + } else { + /* This should NEVER happen */ + printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", hd->ioc->name); + } + + /* No more processing. + * TM call will generate an interrupt for SCSI TM Management. + * The FW will reply to all outstanding commands, callback will finish cleanup. + * Hard reset clean-up will free all resources. + */ + ddvprintk((MYIOC_s_WARN_FMT "Timer Expired Complete!\n", hd->ioc->name)); + + return; +} + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_do_raid - Format and Issue a RAID volume request message. + * @hd: Pointer to scsi host structure + * @action: What do be done. + * @id: Logical target id. + * @bus: Target locations bus. + * + * Returns: < 0 on a fatal error + * 0 on success + * + * Remark: Wait to return until reply processed by the ISR. + */ +static int +mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io) +{ + MpiRaidActionRequest_t *pReq; + MPT_FRAME_HDR *mf; + MptSge_t *psge; + int flagsLength; + int in_isr; + + in_isr = in_interrupt(); + if (in_isr) { + dprintk((MYIOC_s_WARN_FMT "Internal raid request not allowed in ISR context!\n", + hd->ioc->name)); + return -EPERM; + } + + /* Get and Populate a free Frame + */ + if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc->id)) == NULL) { + ddvprintk((MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", + hd->ioc->name)); + return -EAGAIN; + } + pReq = (MpiRaidActionRequest_t *)mf; + pReq->Action = action; + pReq->Reserved1 = 0; + pReq->ChainOffset = 0; + pReq->Function = MPI_FUNCTION_RAID_ACTION; + pReq->VolumeID = io->id; + pReq->VolumeBus = io->bus; + pReq->PhysDiskNum = io->physDiskNum; + pReq->MsgFlags = 0; + pReq->Reserved2 = 0; + pReq->ActionDataWord = 0; /* Reserved for this action */ + //pReq->ActionDataSGE = 0; + + psge = (MptSge_t *) &pReq->ActionDataSGE; + + /* Add a SGE to the config request. + */ + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 0; + + psge->FlagsLength = cpu_to_le32(flagsLength); + cpu_to_leXX( (dma_addr_t) -1, psge->Address); + + ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n", + hd->ioc->name, action, io->id)); + + hd->pLocal = NULL; + hd->timer.expires = jiffies + HZ*2; /* 2 second timeout */ + scandv_wait_done = 0; + + /* Save cmd pointer, for resource free if timeout or + * FW reload occurs + */ + hd->cmdPtr = mf; + + add_timer(&hd->timer); + mptscsih_put_msgframe(ScsiScanDvCtx, hd->ioc->id, mf); + wait_event(scandv_waitq, scandv_wait_done); + + if ((hd->pLocal == NULL) || (hd->pLocal->completion != MPT_SCANDV_GOOD)) + return -1; + + return 0; +} +#endif /* ~MPTSCSIH_DISABLE_DOMAIN_VALIDATION */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_do_cmd - Do internal command. + * @hd: MPT_SCSI_HOST pointer + * @io: INTERNAL_CMD pointer. + * + * Issue the specified internally generated command and do command + * specific cleanup. For bus scan / DV only. + * NOTES: If command is Inquiry and status is good, + * initialize a target structure, save the data + * + * Remark: Single threaded access only. + * + * Return: + * < 0 if an illegal command or no resources + * + * 0 if good + * + * > 0 if command complete but some type of completion error. + */ +static int +mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) +{ + MPT_FRAME_HDR *mf; + MptSge_t *mpisge; + SCSIIORequest_t *pScsiReq; + SCSIIORequest_t ReqCopy; + int my_idx, ii, dir; + int rc, cmdTimeout; + int in_isr; + char cmdLen; + char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + char cmd = io->cmd; + + in_isr = in_interrupt(); + if (in_isr) { + dprintk((MYIOC_s_WARN_FMT "Internal SCSI IO request not allowed in ISR context!\n", + hd->ioc->name)); + return -EPERM; + } + + + /* Set command specific information + */ + switch (cmd) { + case CMD_Inquiry: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + CDB[4] = io->size; + cmdTimeout = 10; + break; + + case CMD_TestUnitReady: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + cmdTimeout = 10; + break; + + case CMD_StartStopUnit: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + CDB[4] = 1; /*Spin up the disk */ + cmdTimeout = 15; + break; + + case CMD_ReadBuffer: + cmdLen = 10; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + if (io->flags & MPT_ICFLAG_ECHO) { + CDB[1] = 0x0A; + } else { + CDB[1] = 0x02; + } + + if (io->flags & MPT_ICFLAG_BUF_CAP) { + CDB[1] |= 0x01; + } + CDB[6] = (io->size >> 16) & 0xFF; + CDB[7] = (io->size >> 8) & 0xFF; + CDB[8] = io->size & 0xFF; + cmdTimeout = 10; + break; + + case CMD_WriteBuffer: + cmdLen = 10; + dir = MPI_SCSIIO_CONTROL_WRITE; + CDB[0] = cmd; + if (io->flags & MPT_ICFLAG_ECHO) { + CDB[1] = 0x0A; + } else { + CDB[1] = 0x02; + } + CDB[6] = (io->size >> 16) & 0xFF; + CDB[7] = (io->size >> 8) & 0xFF; + CDB[8] = io->size & 0xFF; + cmdTimeout = 10; + break; + + case CMD_Reserve6: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + cmdTimeout = 10; + break; + + case CMD_Release6: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + cmdTimeout = 10; + break; + + case CMD_SynchronizeCache: + cmdLen = 10; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; +// CDB[1] = 0x02; /* set immediate bit */ + cmdTimeout = 10; + break; + + default: + /* Error Case */ + return -EFAULT; + } + + /* Get and Populate a free Frame + */ + if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc->id)) == NULL) { + ddvprintk((MYIOC_s_WARN_FMT "No msg frames!\n", + hd->ioc->name)); + return -EBUSY; + } + + pScsiReq = (SCSIIORequest_t *) mf; + + /* Get the request index */ + my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + ADD_INDEX_LOG(my_idx); /* for debug */ + + if (io->flags & MPT_ICFLAG_PHYS_DISK) { + pScsiReq->TargetID = io->physDiskNum; + pScsiReq->Bus = 0; + pScsiReq->ChainOffset = 0; + pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + } else { + pScsiReq->TargetID = io->id; + pScsiReq->Bus = io->bus; + pScsiReq->ChainOffset = 0; + pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; + } + + pScsiReq->CDBLength = cmdLen; + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + + pScsiReq->Reserved = 0; + + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; + /* MsgContext set in mpt_get_msg_fram call */ + + for (ii=0; ii < 8; ii++) + pScsiReq->LUN[ii] = 0; + pScsiReq->LUN[1] = io->lun; + + if (io->flags & MPT_ICFLAG_TAGGED_CMD) + pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ); + else + pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); + + for (ii=0; ii < 16; ii++) + pScsiReq->CDB[ii] = CDB[ii]; + + pScsiReq->DataLength = cpu_to_le32(io->size); + pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma + + (my_idx * MPT_SENSE_BUFFER_ALLOC)); + + ddvprintk((MYIOC_s_INFO_FMT "Sending Command 0x%x for (%d:%d:%d)\n", + hd->ioc->name, cmd, io->bus, io->id, io->lun)); + + /* 32 bit SG only */ + mpisge = (MptSge_t *) &pScsiReq->SGL; + + if (dir == MPI_SCSIIO_CONTROL_READ) { + mpisge->FlagsLength = cpu_to_le32( + MPT_SGE_FLAGS_SSIMPLE_READ | io->size); + } else { + mpisge->FlagsLength = cpu_to_le32( + MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size); + } + + /* data_dma defaults to -1 + */ + cpu_to_leXX(io->data_dma, mpisge->Address); + + /* The ISR will free the request frame, but we need + * the information to initialize the target. Duplicate. + */ + memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t)); + + /* Issue this command after: + * finish init + * add timer + * Wait until the reply has been received + * ScsiScanDvCtx callback function will + * set hd->pLocal; + * set scandv_wait_done and call wake_up + */ + hd->pLocal = NULL; + hd->timer.expires = jiffies + HZ*cmdTimeout; + scandv_wait_done = 0; + + /* Save cmd pointer, for resource free if timeout or + * FW reload occurs + */ + hd->cmdPtr = mf; + + add_timer(&hd->timer); + mptscsih_put_msgframe(ScsiScanDvCtx, hd->ioc->id, mf); + wait_event(scandv_waitq, scandv_wait_done); + + if (hd->pLocal) { + rc = hd->pLocal->completion; + hd->pLocal->skip = 0; + + /* Always set fatal error codes in some cases. + */ + if (rc == MPT_SCANDV_SELECTION_TIMEOUT) + rc = -ENXIO; + else if (rc == MPT_SCANDV_SOME_ERROR) + rc = -rc; + } else { + rc = -EFAULT; + /* This should never happen. */ + ddvprintk((MYIOC_s_INFO_FMT "_do_cmd: Null pLocal!!!\n", + hd->ioc->name)); + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks. + * @hd: Pointer to MPT_SCSI_HOST structure + * @portnum: IOC port number + * + * Uses the ISR, but with special processing. + * MUST be single-threaded. + * + * Return: 0 on completion + */ +static int +mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum) +{ + MPT_ADAPTER *ioc= hd->ioc; + VirtDevice *pTarget = NULL; + SCSIDevicePage1_t *pcfg1Data = NULL; + INTERNAL_CMD iocmd; + CONFIGPARMS cfg; + dma_addr_t cfg1_dma_addr = -1; + ConfigPageHeader_t header1; + int bus = 0; + int id = 0; + int lun = 0; + int hostId = ioc->pfacts[portnum].PortSCSIID; + int max_id; + int requested, configuration, data; + int doConfig = 0; + u8 flags, factor; + + max_id = ioc->sh->max_id - 1; + + /* Following parameters will not change + * in this routine. + */ + iocmd.cmd = CMD_SynchronizeCache; + iocmd.flags = 0; + iocmd.physDiskNum = -1; + iocmd.data = NULL; + iocmd.data_dma = -1; + iocmd.size = 0; + iocmd.rsvd = iocmd.rsvd2 = 0; + + /* No SCSI hosts + */ + if (hd->Targets == NULL) + return 0; + + /* Skip the host + */ + if (id == hostId) + id++; + + /* Write SDP1 for all SCSI devices + * Alloc memory and set up config buffer + */ + if (hd->is_spi) { + if (ioc->spi_data.sdp1length > 0) { + pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev, + ioc->spi_data.sdp1length * 4, &cfg1_dma_addr); + + if (pcfg1Data != NULL) { + doConfig = 1; + header1.PageVersion = ioc->spi_data.sdp1version; + header1.PageLength = ioc->spi_data.sdp1length; + header1.PageNumber = 1; + header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + cfg.timeout = 0; + } + } + } + + /* loop through all devices on this port + */ + while (bus < MPT_MAX_BUS) { + iocmd.bus = bus; + iocmd.id = id; + pTarget = hd->Targets[(int)id]; + + if (doConfig) { + + /* Set the negotiation flags */ + if (pTarget && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) { + flags = pTarget->negoFlags; + } else { + flags = 0; + if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { + data = hd->ioc->spi_data.nvram[id]; + + if (data & MPT_NVRAM_WIDE_DISABLE) + flags |= MPT_TARGET_NO_NEGO_WIDE; + + factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; + if ((factor == 0) || (factor == MPT_ASYNC)) + flags |= MPT_TARGET_NO_NEGO_SYNC; + } + } + + /* Force to async, narrow */ + mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested, + &configuration, flags); + pcfg1Data->RequestedParameters = le32_to_cpu(requested); + pcfg1Data->Reserved = 0; + pcfg1Data->Configuration = le32_to_cpu(configuration); + cfg.pageAddr = (bus<<8) | id; + mpt_config(hd->ioc, &cfg); + } + + /* If target Ptr NULL or if this target is NOT a disk, skip. + */ + // if (pTarget && ((pTarget->inq_data[0] & 0x1F) == 0)) { + if (pTarget) { + for (lun=0; lun <= MPT_LAST_LUN; lun++) { + /* If LUN present, issue the command + */ + if (pTarget->luns & (1< max_id) { + id = 0; + bus++; + } + } + + if (pcfg1Data) { + pci_free_consistent(ioc->pcidev, header1.PageLength * 4, pcfg1Data, cfg1_dma_addr); + } + + return 0; +} + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_domainValidation - Top level handler for domain validation. + * @hd: Pointer to MPT_SCSI_HOST structure. + * + * Uses the ISR, but with special processing. + * Called from schedule, should not be in interrupt mode. + * While thread alive, do dv for all devices needing dv + * + * Return: None. + */ +static void +mptscsih_domainValidation(void *arg) +{ + MPT_SCSI_HOST *hd = NULL; + MPT_ADAPTER *ioc = NULL; + unsigned long flags; + int id, maxid, dvStatus, did; + int ii, isPhysDisk; + + spin_lock_irqsave(&dvtaskQ_lock, flags); + dvtaskQ_active = 1; + if (dvtaskQ_release) { + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + return; + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + + /* For this ioc, loop through all devices and do dv to each device. + * When complete with this ioc, search through the ioc list, and + * for each scsi ioc found, do dv for all devices. Exit when no + * device needs dv. + */ + did = 1; + while (did) { + did = 0; + for (ioc = mpt_adapter_find_first(); ioc != NULL; ioc = mpt_adapter_find_next(ioc)) { + spin_lock_irqsave(&dvtaskQ_lock, flags); + if (dvtaskQ_release) { + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + return; + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); + + /* DV only to SCSI adapters */ + if ((int)ioc->chip_type <= (int)FC929) + continue; + + /* Make sure everything looks ok */ + if (ioc->sh == NULL) + continue; + + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd == NULL) + continue; + + maxid = MIN (ioc->sh->max_id, MPT_MAX_SCSI_DEVICES); + + for (id = 0; id < maxid; id++) { + spin_lock_irqsave(&dvtaskQ_lock, flags); + if (dvtaskQ_release) { + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + return; + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + dvStatus = hd->ioc->spi_data.dvStatus[id]; + + if (dvStatus & MPT_SCSICFG_NEED_DV) { + + hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_PENDING; + hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_NEED_DV; + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); + + /* If hidden phys disk, block IO's to all + * raid volumes + * else, process normally + */ + isPhysDisk = 0; + if (ioc->spi_data.pIocPg3) { + /* Search IOC page 3 to determine if + * this is hidden physical disk + */ + Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; + int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; + + while (numPDisk) { + if (pPDisk->PhysDiskID == id) { + isPhysDisk = 1; + break; + } + pPDisk++; + numPDisk--; + } + } + + if (isPhysDisk) { + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + if (hd->ioc->spi_data.isRaid & (1 << ii)) { + hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING; + } + } + } + + mptscsih_doDv(hd, 0, id); + did++; + hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_DONE; + hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_PENDING; + + if (isPhysDisk) { + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + if (hd->ioc->spi_data.isRaid & (1 << ii)) { + hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING; + } + } + } + + /* Post OS IOs that were pended while + * DV running. + */ + post_pendingQ_commands(hd); + } + } + } + } + + spin_lock_irqsave(&dvtaskQ_lock, flags); + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + + return; +} + + +#define MPT_GET_NVRAM_VALS 0x01 +#define MPT_UPDATE_MAX 0x02 +#define MPT_SET_MAX 0x04 +#define MPT_SET_MIN 0x08 +#define MPT_FALLBACK 0x10 +#define MPT_SAVE 0x20 + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_doDv - Perform domain validation to a target. + * @hd: Pointer to MPT_SCSI_HOST structure. + * @portnum: IOC port number. + * @target: Physical ID of this target + * + * Uses the ISR, but with special processing. + * MUST be single-threaded. + * Test will exit if target is at async & narrow. + * + * Return: None. + */ +static void +mptscsih_doDv(MPT_SCSI_HOST *hd, int portnum, int id) +{ + MPT_ADAPTER *ioc = hd->ioc; + VirtDevice *pTarget = NULL; + u8 *pbuf1 = NULL; + u8 *pbuf2 = NULL; + dma_addr_t buf1_dma = -1; + dma_addr_t buf2_dma = -1; + ConfigPageHeader_t header1; + SCSIDevicePage1_t *pcfg1Data = NULL; + dma_addr_t cfg1_dma_addr = -1; + ConfigPageHeader_t header0; + SCSIDevicePage0_t *pcfg0Data = NULL; + dma_addr_t cfg0_dma_addr = -1; + DVPARAMETERS dv; + INTERNAL_CMD iocmd; + CONFIGPARMS cfg; + int rc, sz = 0; + int bufsize = 0; + int dataBufSize = 0; + int echoBufSize = 0; + int notDone; + int patt; + int repeat; + char firstPass = 1; + char doFallback = 0; + char readPage0; + char bus, lun; + + if (ioc->spi_data.sdp1length == 0) + return; + + if (ioc->spi_data.sdp0length == 0) + return; + + if (id == ioc->pfacts[portnum].PortSCSIID) + return; + + lun = 0; + bus = 0; + ddvtprintk((MYIOC_s_NOTE_FMT + "DV started: numIOs %d bus=%d, id %d dv @ %p\n", + ioc->name, atomic_read(&queue_depth), bus, id, &dv)); + + /* Prep DV structure + */ + memset (&dv, 0, sizeof(DVPARAMETERS)); + dv.id = id; + + /* Populate tmax with the current maximum + * transfer parameters for this target. + * Exit if narrow and async. + */ + dv.cmd = MPT_GET_NVRAM_VALS; + mptscsih_dv_parms(hd, &dv, NULL); + if ((!dv.max.width) && (!dv.max.offset)) + return; + + /* Prep SCSI IO structure + */ + iocmd.id = id; + iocmd.bus = bus; + iocmd.lun = lun; + iocmd.flags = 0; + iocmd.physDiskNum = -1; + iocmd.rsvd = iocmd.rsvd2 = 0; + + /* Use tagged commands if possible. + */ + pTarget = hd->Targets[id]; + if (pTarget && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) + iocmd.flags |= MPT_ICFLAG_TAGGED_CMD; + + /* Prep cfg structure + */ + cfg.pageAddr = (bus<<8) | id; + cfg.hdr = NULL; + + /* Prep SDP0 header + */ + header0.PageVersion = ioc->spi_data.sdp0version; + header0.PageLength = ioc->spi_data.sdp0length; + header0.PageNumber = 0; + header0.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + pcfg0Data = (SCSIDevicePage0_t *)pci_alloc_consistent(ioc->pcidev, + header0.PageLength * 4, &cfg0_dma_addr); + if (!pcfg0Data) + return; + + /* Prep SDP1 header + */ + header1.PageVersion = ioc->spi_data.sdp1version; + header1.PageLength = ioc->spi_data.sdp1length; + header1.PageNumber = 1; + header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev, + header1.PageLength * 4, &cfg1_dma_addr); + if (!pcfg1Data) + goto target_done; + + /* Skip this ID? Set cfg.hdr to force config page write + */ + if ((ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID) && + (!(ioc->spi_data.nvram[id] & MPT_NVRAM_ID_SCAN_ENABLE))) { + + ddvprintk((MYIOC_s_NOTE_FMT "DV Skipped: bus, id, lun (%d, %d, %d)\n", + ioc->name, bus, id, lun)); + + dv.cmd = MPT_SET_MAX; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + cfg.hdr = &header1; + goto target_done; + } + + /* Finish iocmd inititialization - hidden or visible disk? */ + if (ioc->spi_data.pIocPg3) { + /* Searc IOC page 3 for matching id + */ + Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; + int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; + + while (numPDisk) { + if (pPDisk->PhysDiskID == id) { + /* match */ + iocmd.flags |= MPT_ICFLAG_PHYS_DISK; + iocmd.physDiskNum = pPDisk->PhysDiskNum; + + /* Quiesce the IM + */ + if (mptscsih_do_raid(hd, MPI_RAID_ACTION_QUIESCE_PHYS_IO, &iocmd) < 0) { + ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name)); + goto target_done; + } + break; + } + pPDisk++; + numPDisk--; + } + } + + /* RAID Volume ID's may double for a physical device. If RAID but + * not a physical ID as well, skip DV. + */ + if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK)) + goto target_done; + + + /* Basic Test. + * Async & Narrow - Inquiry + * Async & Narrow - Inquiry + * Maximum transfer rate - Inquiry + * Compare buffers: + * If compare, test complete. + * If miscompare and first pass, repeat + * If miscompare and not first pass, fall back and repeat + */ + hd->pLocal = NULL; + readPage0 = 0; + sz = SCSI_STD_INQUIRY_BYTES; + pbuf1 = pci_alloc_consistent(ioc->pcidev, sz, &buf1_dma); + pbuf2 = pci_alloc_consistent(ioc->pcidev, sz, &buf2_dma); + if (!pbuf1 || !pbuf2) + goto target_done; + + while (1) { + ddvprintk((MYIOC_s_NOTE_FMT "DV: Start Basic test.\n", ioc->name)); + dv.cmd = MPT_SET_MIN; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + iocmd.cmd = CMD_Inquiry; + iocmd.data_dma = buf1_dma; + iocmd.data = pbuf1; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + /* Another GEM workaround. Check peripheral device type, + * if PROCESSOR, quit DV. + */ + if (((pbuf1[0] & 0x1F) == 0x03) || ((pbuf1[0] & 0x1F) > 0x08)) + goto target_done; + + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + if (doFallback) + dv.cmd = MPT_FALLBACK; + else + dv.cmd = MPT_SET_MAX; + + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + + iocmd.cmd = CMD_Inquiry; + iocmd.data_dma = buf2_dma; + iocmd.data = pbuf2; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + /* Save the return code. + * If this is the first pass, + * read SCSI Device Page 0 + * and update the target max parameters. + */ + rc = hd->pLocal->completion; + doFallback = 0; + if (rc == MPT_SCANDV_GOOD) { + if (!readPage0) { + u32 sdp0_info; + u32 sdp0_nego; + + cfg.hdr = &header0; + cfg.physAddr = cfg0_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.dir = 0; + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + sdp0_info = le32_to_cpu(pcfg0Data->Information) & 0x0E; + sdp0_nego = (le32_to_cpu(pcfg0Data->NegotiatedParameters) & 0xFF00 ) >> 8; + + /* Quantum and Fujitsu workarounds. + * Quantum: PPR U320 -> PPR reply with Ultra2 and wide + * Fujitsu: PPR U320 -> Msg Reject and Ultra2 and wide + * Resetart with a request for U160. + */ + if ((dv.now.factor == MPT_ULTRA320) && (sdp0_nego == MPT_ULTRA2)) { + doFallback = 1; + } else { + dv.cmd = MPT_UPDATE_MAX; + mptscsih_dv_parms(hd, &dv, (void *)pcfg0Data); + /* Update the SCSI device page 1 area + */ + pcfg1Data->RequestedParameters = pcfg0Data->NegotiatedParameters; + readPage0 = 1; + } + } + + /* Quantum workaround. Restart this test will the fallback + * flag set. + */ + if (doFallback == 0) { + if (memcmp(pbuf1, pbuf2, sz) != 0) { + if (!firstPass) + doFallback = 1; + } else + break; /* test complete */ + } + + + } else if ((rc == MPT_SCANDV_DID_RESET) || (rc == MPT_SCANDV_SENSE)) + doFallback = 1; /* set fallback flag */ + else + goto target_done; + + firstPass = 0; + } + } + /* Free pbuf2, but use pbuf1 for + * acquiring the (echo) buffer size. + */ + pci_free_consistent(ioc->pcidev, sz, pbuf2, buf2_dma); + pbuf2 = NULL; + ddvprintk((MYIOC_s_NOTE_FMT "DV: Basic test completed OK.\n", ioc->name)); + + /* Start the Enhanced Test. + * 0) issue TUR to clear out check conditions + * 1) read capacity of echo (regular) buffer + * 2) reserve device + * 3) do write-read-compare data pattern test + * 4) release + * 5) update nego parms to target struct + */ + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + + iocmd.cmd = CMD_TestUnitReady; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + notDone = 1; + while (notDone) { + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + if (hd->pLocal == NULL) + goto target_done; + + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) + notDone = 0; + else if (rc == MPT_SCANDV_SENSE) { + u8 skey = hd->pLocal->sense[2] & 0x0F; + u8 asc = hd->pLocal->sense[12]; + u8 ascq = hd->pLocal->sense[13]; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", + ioc->name, skey, asc, ascq)); + + if (skey == SK_UNIT_ATTENTION) + notDone++; /* repeat */ + else if ((skey == SK_NOT_READY) && + (asc == 0x04)&&(ascq == 0x01)) { + /* wait then repeat */ + mdelay (2000); + notDone++; + } else if ((skey == SK_NOT_READY) && (asc == 0x3A)) { + /* no medium, try read test anyway */ + notDone = 0; + } else { + /* All other errors are fatal. + */ + ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.", + ioc->name)); + goto target_done; + } + } else + goto target_done; + } + + iocmd.cmd = CMD_ReadBuffer; + iocmd.data_dma = buf1_dma; + iocmd.data = pbuf1; + iocmd.size = 4; + iocmd.flags |= MPT_ICFLAG_BUF_CAP; + + dataBufSize = 0; + echoBufSize = 0; + for (patt = 0; patt < 2; patt++) { + if (patt == 0) + iocmd.flags |= MPT_ICFLAG_ECHO; + else + iocmd.flags &= ~MPT_ICFLAG_ECHO; + + notDone = 1; + while (notDone) { + bufsize = 0; + + /* If not ready after 8 trials, + * give up on this device. + */ + if (notDone > 8) + goto target_done; + + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + ddvprintk(("ReadBuffer Comp Code %d", rc)); + ddvprintk((" buff: %0x %0x %0x %0x\n", + pbuf1[0], pbuf1[1], pbuf1[2], pbuf1[3])); + + if (rc == MPT_SCANDV_GOOD) { + notDone = 0; + if (iocmd.flags & MPT_ICFLAG_ECHO) { + bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3]; + } else { + bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3]; + } + } else if (rc == MPT_SCANDV_SENSE) { + u8 skey = hd->pLocal->sense[2] & 0x0F; + u8 asc = hd->pLocal->sense[12]; + u8 ascq = hd->pLocal->sense[13]; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", + ioc->name, skey, asc, ascq)); + if (skey == SK_ILLEGAL_REQUEST) { + notDone = 0; + } else if (skey == SK_UNIT_ATTENTION) { + notDone++; /* repeat */ + } else if ((skey == SK_NOT_READY) && + (asc == 0x04)&&(ascq == 0x01)) { + /* wait then repeat */ + mdelay (2000); + notDone++; + } else { + /* All other errors are fatal. + */ + ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.", + ioc->name)); + goto target_done; + } + } else { + /* All other errors are fatal + */ + goto target_done; + } + } + } + + if (iocmd.flags & MPT_ICFLAG_ECHO) + echoBufSize = bufsize; + else + dataBufSize = bufsize; + } + pci_free_consistent(ioc->pcidev, sz, pbuf1, buf1_dma); + pbuf1 = NULL; + sz = 0; + iocmd.flags &= ~MPT_ICFLAG_BUF_CAP; + + /* Use echo buffers if possible, + * Exit if both buffers are 0. + */ + if (echoBufSize > 0) { + iocmd.flags |= MPT_ICFLAG_ECHO; + if (dataBufSize > 0) + bufsize = MIN(echoBufSize, dataBufSize); + else + bufsize = echoBufSize; + } else if (dataBufSize == 0) + goto target_done; + + ddvprintk((MYIOC_s_INFO_FMT "%s Buffer Capacity %d\n", ioc->name, + (iocmd.flags & MPT_ICFLAG_ECHO) ? "Echo" : " ", bufsize)); + + /* Allocate data buffers for write-read-compare test. + */ + sz = MIN(bufsize, 1024); + pbuf1 = pci_alloc_consistent(ioc->pcidev, sz, &buf1_dma); + pbuf2 = pci_alloc_consistent(ioc->pcidev, sz, &buf2_dma); + if (!pbuf1 || !pbuf2) + goto target_done; + + /* --- loop ---- + * On first pass, always issue a reserve. + * On additional loops, only if a reset has occurred. + * iocmd.flags indicates if echo or regular buffer + */ + for (patt = 0; patt < 4; patt++) { + ddvprintk(("Pattern %d\n", patt)); + if ((iocmd.flags & MPT_ICFLAG_RESERVED) && (iocmd.flags & MPT_ICFLAG_DID_RESET)) { + iocmd.cmd = CMD_TestUnitReady; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + iocmd.cmd = CMD_Release6; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + ddvprintk(("Release rc %d\n", rc)); + if (rc == MPT_SCANDV_GOOD) + iocmd.flags &= ~MPT_ICFLAG_RESERVED; + else + goto target_done; + } + iocmd.flags &= ~MPT_ICFLAG_RESERVED; + } + iocmd.flags &= ~MPT_ICFLAG_DID_RESET; + + repeat = 5; + while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) { + iocmd.cmd = CMD_Reserve6; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) { + iocmd.flags |= MPT_ICFLAG_RESERVED; + } else if (rc == MPT_SCANDV_SENSE) { + /* Wait if coming ready + */ + u8 skey = hd->pLocal->sense[2] & 0x0F; + u8 asc = hd->pLocal->sense[12]; + u8 ascq = hd->pLocal->sense[13]; + ddvprintk((MYIOC_s_INFO_FMT + "DV: Reserve Failed: ", ioc->name)); + ddvprintk(("SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", + skey, asc, ascq)); + + if ((skey == SK_NOT_READY) && (asc == 0x04)&& + (ascq == 0x01)) { + /* wait then repeat */ + mdelay (2000); + notDone++; + } else { + ddvprintk((MYIOC_s_INFO_FMT + "DV: Reserved Failed.", ioc->name)); + goto target_done; + } + } else { + ddvprintk((MYIOC_s_INFO_FMT "DV: Reserved Failed.", + ioc->name)); + goto target_done; + } + } + } + + mptscsih_fillbuf(pbuf1, sz, patt, 1); + iocmd.cmd = CMD_WriteBuffer; + iocmd.data_dma = buf1_dma; + iocmd.data = pbuf1; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) + ; /* Issue read buffer */ + else if (rc == MPT_SCANDV_DID_RESET) { + /* If using echo buffers, reset to data buffers. + * Else do Fallback and restart + * this test (re-issue reserve + * because of bus reset). + */ + if ((iocmd.flags & MPT_ICFLAG_ECHO) && (dataBufSize >= bufsize)) { + iocmd.flags &= ~MPT_ICFLAG_ECHO; + } else { + dv.cmd = MPT_FALLBACK; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + } + + iocmd.flags |= MPT_ICFLAG_DID_RESET; + patt = -1; + continue; + } else if (rc == MPT_SCANDV_SENSE) { + /* Restart data test if UA, else quit. + */ + u8 skey = hd->pLocal->sense[2] & 0x0F; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey, + hd->pLocal->sense[12], hd->pLocal->sense[13])); + if (skey == SK_UNIT_ATTENTION) { + patt = -1; + continue; + } else if (skey == SK_ILLEGAL_REQUEST) { + if (iocmd.flags & MPT_ICFLAG_ECHO) { + if (dataBufSize >= bufsize) { + iocmd.flags &= ~MPT_ICFLAG_ECHO; + patt = -1; + continue; + } + } + goto target_done; + } + else + goto target_done; + } else { + /* fatal error */ + goto target_done; + } + } + + iocmd.cmd = CMD_ReadBuffer; + iocmd.data_dma = buf2_dma; + iocmd.data = pbuf2; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) { + /* If buffers compare, + * go to next pattern, + * else, do a fallback and restart + * data transfer test. + */ + if (memcmp (pbuf1, pbuf2, sz) == 0) { + ; /* goto next pattern */ + } else { + /* Miscompare with Echo buffer, go to data buffer, + * if that buffer exists. + * Miscompare with Data buffer, check first 4 bytes, + * some devices return capacity. Exit in this case. + */ + if (iocmd.flags & MPT_ICFLAG_ECHO) { + if (dataBufSize >= bufsize) + iocmd.flags &= ~MPT_ICFLAG_ECHO; + else + goto target_done; + } else { + if (dataBufSize == (pbuf2[1]<<16 | pbuf2[2]<<8 | pbuf2[3])) { + /* Argh. Device returning wrong data. + * Quit DV for this device. + */ + goto target_done; + } + + /* Had an actual miscompare. Slow down.*/ + dv.cmd = MPT_FALLBACK; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + } + + patt = -1; + continue; + } + } else if (rc == MPT_SCANDV_DID_RESET) { + /* Do Fallback and restart + * this test (re-issue reserve + * because of bus reset). + */ + dv.cmd = MPT_FALLBACK; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + + iocmd.flags |= MPT_ICFLAG_DID_RESET; + patt = -1; + continue; + } else if (rc == MPT_SCANDV_SENSE) { + /* Restart data test if UA, else quit. + */ + u8 skey = hd->pLocal->sense[2] & 0x0F; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey, + hd->pLocal->sense[12], hd->pLocal->sense[13])); + if (skey == SK_UNIT_ATTENTION) { + patt = -1; + continue; + } + else + goto target_done; + } else { + /* fatal error */ + goto target_done; + } + } + + } /* --- end of patt loop ---- */ + +target_done: + if (iocmd.flags & MPT_ICFLAG_RESERVED) { + iocmd.cmd = CMD_Release6; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d", + ioc->name, id); + else if (hd->pLocal) { + if (hd->pLocal->completion == MPT_SCANDV_GOOD) + iocmd.flags &= ~MPT_ICFLAG_RESERVED; + } else { + printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d", + ioc->name, id); + } + } + + + /* Set if cfg1_dma_addr contents is valid + */ + if (cfg.hdr != NULL) { + dv.cmd = MPT_SAVE; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + /* Save the final negotiated settings to + * SCSI device page 1. + */ + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + mpt_config(hd->ioc, &cfg); + } + + /* If this is a RAID Passthrough, enable internal IOs + */ + if (iocmd.flags & MPT_ICFLAG_PHYS_DISK) { + if (mptscsih_do_raid(hd, MPI_RAID_ACTION_ENABLE_PHYS_IO, &iocmd) < 0) + ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name)); + } + + /* Done with the DV scan of the current target + */ + if (pcfg0Data) { + pci_free_consistent(ioc->pcidev, header0.PageLength * 4, + pcfg0Data, cfg0_dma_addr); + } + + if (pcfg1Data) { + pci_free_consistent(ioc->pcidev, header1.PageLength * 4, + pcfg1Data, cfg1_dma_addr); + } + + if (pbuf1) { + pci_free_consistent(ioc->pcidev, sz, pbuf1, buf1_dma); + pbuf1 = NULL; + } + + if (pbuf2) { + pci_free_consistent(ioc->pcidev, sz, pbuf2, buf2_dma); + pbuf2 = NULL; + } + + ddvtprintk((MYIOC_s_INFO_FMT "DV Done. IOs outstanding = %d\n", + ioc->name, atomic_read(&queue_depth))); + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_dv_parms - perform a variety of operations on the + * parameters used for negotiation. + * @hd: Pointer to a SCSI host. + * @dv: Pointer to a structure that contains the maximum and current + * negotiated parameters. + */ +static void +mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage) +{ + VirtDevice *pTarget = NULL; + SCSIDevicePage0_t *pPage0 = NULL; + SCSIDevicePage1_t *pPage1 = NULL; + int val = 0, data, configuration; + u8 width = 0; + u8 offset = 0; + u8 factor = 0; + u8 negoFlags = 0; + u8 cmd = dv->cmd; + u8 id = dv->id; + + switch (cmd) { + case MPT_GET_NVRAM_VALS: + ddvprintk((MYIOC_s_NOTE_FMT "Getting NVRAM: ", + hd->ioc->name)); + /* Get the NVRAM values and save in tmax + * If not an LVD bus, the adapter minSyncFactor has been + * already throttled back. + */ + if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) { + width = pTarget->maxWidth; + offset = pTarget->maxOffset; + factor = pTarget->minSyncFactor; + negoFlags = pTarget->negoFlags; + } else { + if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { + data = hd->ioc->spi_data.nvram[id]; + width = data & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; + if ((offset = hd->ioc->spi_data.maxSyncOffset) == 0) + factor = MPT_ASYNC; + else { + factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; + if ((factor == 0) || (factor == MPT_ASYNC)){ + factor = MPT_ASYNC; + offset = 0; + } + } + } else { + width = MPT_NARROW; + offset = 0; + factor = MPT_ASYNC; + } + + /* Set the negotiation flags */ + negoFlags = 0; + if (!width) + negoFlags |= MPT_TARGET_NO_NEGO_WIDE; + + if (!offset) + negoFlags |= MPT_TARGET_NO_NEGO_SYNC; + } + + /* limit by adapter capabilities */ + width = MIN(width, hd->ioc->spi_data.maxBusWidth); + offset = MIN(offset, hd->ioc->spi_data.maxSyncOffset); + factor = MAX(factor, hd->ioc->spi_data.minSyncFactor); + + /* Check Consistency */ + if (offset && (factor < MPT_ULTRA2) && !width) + factor = MPT_ULTRA2; + + dv->max.width = width; + dv->max.offset = offset; + dv->max.factor = factor; + dv->max.flags = negoFlags; + ddvprintk((" width %d, factor %x, offset %x flags %x\n", + width, factor, offset, negoFlags)); + break; + + case MPT_UPDATE_MAX: + ddvprintk((MYIOC_s_NOTE_FMT + "Updating with SDP0 Data: ", hd->ioc->name)); + /* Update tmax values with those from Device Page 0.*/ + pPage0 = (SCSIDevicePage0_t *) pPage; + if (pPage0) { + val = cpu_to_le32(pPage0->NegotiatedParameters); + dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0; + dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16; + dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; + } + + dv->now.width = dv->max.width; + dv->now.offset = dv->max.offset; + dv->now.factor = dv->max.factor; + ddvprintk(("width %d, factor %x, offset %x, flags %x\n", + dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags)); + break; + + case MPT_SET_MAX: + ddvprintk((MYIOC_s_NOTE_FMT "Setting Max: ", + hd->ioc->name)); + /* Set current to the max values. Update the config page.*/ + dv->now.width = dv->max.width; + dv->now.offset = dv->max.offset; + dv->now.factor = dv->max.factor; + dv->now.flags = dv->max.flags; + + pPage1 = (SCSIDevicePage1_t *)pPage; + if (pPage1) { + mptscsih_setDevicePage1Flags (dv->now.width, dv->now.factor, + dv->now.offset, &val, &configuration, dv->now.flags); + pPage1->RequestedParameters = le32_to_cpu(val); + pPage1->Reserved = 0; + pPage1->Configuration = le32_to_cpu(configuration); + + } + + ddvprintk(("width %d, factor %x, offset %x request %x, config %x\n", + dv->now.width, dv->now.factor, dv->now.offset, val, configuration)); + break; + + case MPT_SET_MIN: + ddvprintk((MYIOC_s_NOTE_FMT "Setting Min: ", + hd->ioc->name)); + /* Set page to asynchronous and narrow + * Do not update now, breaks fallback routine. */ + width = MPT_NARROW; + offset = 0; + factor = MPT_ASYNC; + negoFlags = dv->max.flags; + + pPage1 = (SCSIDevicePage1_t *)pPage; + if (pPage1) { + mptscsih_setDevicePage1Flags (width, factor, + offset, &val, &configuration, negoFlags); + pPage1->RequestedParameters = le32_to_cpu(val); + pPage1->Reserved = 0; + pPage1->Configuration = le32_to_cpu(configuration); + } + ddvprintk(("width %d, factor %x, offset %x request %x config %x\n", + dv->now.width, dv->now.factor, + dv->now.offset, val, configuration)); + break; + + case MPT_FALLBACK: + ddvprintk((MYIOC_s_NOTE_FMT + "Fallback: Start: offset %d, factor %x, width %d \n", + hd->ioc->name, dv->now.offset, + dv->now.factor, dv->now.width)); + width = dv->now.width; + offset = dv->now.offset; + factor = dv->now.factor; + if ((offset) && (dv->max.width)) { + if (factor < MPT_ULTRA160) + factor = MPT_ULTRA160; + else if (factor < MPT_ULTRA2) { + factor = MPT_ULTRA2; + width = MPT_WIDE; + } else if ((factor == MPT_ULTRA2) && width) { + factor = MPT_ULTRA2; + width = MPT_NARROW; + } else if (factor < MPT_ULTRA) { + factor = MPT_ULTRA; + width = MPT_WIDE; + } else if ((factor == MPT_ULTRA) && width) { + factor = MPT_ULTRA; + width = MPT_NARROW; + } else if (factor < MPT_FAST) { + factor = MPT_FAST; + width = MPT_WIDE; + } else if ((factor == MPT_FAST) && width) { + factor = MPT_FAST; + width = MPT_NARROW; + } else if (factor < MPT_SCSI) { + factor = MPT_SCSI; + width = MPT_WIDE; + } else if ((factor == MPT_SCSI) && width) { + factor = MPT_SCSI; + width = MPT_NARROW; + } else { + factor = MPT_ASYNC; + offset = 0; + } + + } else if (offset) { + width = MPT_NARROW; + if (factor < MPT_ULTRA) + factor = MPT_ULTRA; + else if (factor < MPT_FAST) + factor = MPT_FAST; + else if (factor < MPT_SCSI) + factor = MPT_SCSI; + else { + factor = MPT_ASYNC; + offset = 0; + } + + } else { + width = MPT_NARROW; + factor = MPT_ASYNC; + } + + dv->now.width = width; + dv->now.offset = offset; + dv->now.factor = factor; + dv->now.flags = dv->max.flags; + + pPage1 = (SCSIDevicePage1_t *)pPage; + if (pPage1) { + mptscsih_setDevicePage1Flags (width, factor, offset, &val, + &configuration, dv->now.flags); + + pPage1->RequestedParameters = le32_to_cpu(val); + pPage1->Reserved = 0; + pPage1->Configuration = le32_to_cpu(configuration); + } + + ddvprintk(("Finish: offset %d, factor %x, width %d, request %x config %x\n", + dv->now.offset, dv->now.factor, dv->now.width, val, configuration)); + break; + + case MPT_SAVE: + ddvprintk((MYIOC_s_NOTE_FMT + "Saving to Target structure: ", hd->ioc->name)); + ddvprintk(("offset %d, factor %x, width %d \n", + dv->now.offset, dv->now.factor, dv->now.width)); + + /* Save these values to target structures + * or overwrite nvram (phys disks only). + */ + + if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume ) { + pTarget->maxWidth = dv->now.width; + pTarget->maxOffset = dv->now.offset; + pTarget->minSyncFactor = dv->now.factor; + } else { + /* Preserv all flags, use + * read-modify-write algorithm + */ + data = hd->ioc->spi_data.nvram[id]; + + if (dv->now.width) + data &= ~MPT_NVRAM_WIDE_DISABLE; + else + data |= MPT_NVRAM_WIDE_DISABLE; + + if (!dv->now.offset) + factor = MPT_ASYNC; + + data &= ~MPT_NVRAM_SYNC_MASK; + data |= (dv->now.factor << MPT_NVRAM_SYNC_SHIFT) & MPT_NVRAM_SYNC_MASK; + + hd->ioc->spi_data.nvram[id] = data; + } + break; + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_fillbuf - fill a buffer with a special data pattern + * cleanup. For bus scan only. + * + * @buffer: Pointer to data buffer to be filled. + * @size: Number of bytes to fill + * @index: Pattern index + * @width: bus width, 0 (8 bits) or 1 (16 bits) + */ +static void +mptscsih_fillbuf(char *buffer, int size, int index, int width) +{ + char *ptr = buffer; + int ii; + char byte; + short val; + + switch (index) { + case 0: + + if (width) { + /* Pattern: 0000 FFFF 0000 FFFF + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x02) + *ptr = 0xFF; + else + *ptr = 0x00; + } + } else { + /* Pattern: 00 FF 00 FF + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x01) + *ptr = 0xFF; + else + *ptr = 0x00; + } + } + break; + + case 1: + if (width) { + /* Pattern: 5555 AAAA 5555 AAAA 5555 + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x02) + *ptr = 0xAA; + else + *ptr = 0x55; + } + } else { + /* Pattern: 55 AA 55 AA 55 + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x01) + *ptr = 0xAA; + else + *ptr = 0x55; + } + } + break; + + case 2: + /* Pattern: 00 01 02 03 04 05 + * ... FE FF 00 01.. + */ + for (ii=0; ii < size; ii++, ptr++) + *ptr = (char) ii; + break; + + case 3: + if (width) { + /* Wide Pattern: FFFE 0001 FFFD 0002 + * ... 4000 DFFF 8000 EFFF + */ + byte = 0; + for (ii=0; ii < size/2; ii++) { + /* Create the base pattern + */ + val = (1 << byte); + /* every 64 (0x40) bytes flip the pattern + * since we fill 2 bytes / iteration, + * test for ii = 0x20 + */ + if (ii & 0x20) + val = ~(val); + + if (ii & 0x01) { + *ptr = (char)( (val & 0xFF00) >> 8); + ptr++; + *ptr = (char)(val & 0xFF); + byte++; + byte &= 0x0F; + } else { + val = ~val; + *ptr = (char)( (val & 0xFF00) >> 8); + ptr++; + *ptr = (char)(val & 0xFF); + } + + ptr++; + } + } else { + /* Narrow Pattern: FE 01 FD 02 FB 04 + * .. 7F 80 01 FE 02 FD ... 80 7F + */ + byte = 0; + for (ii=0; ii < size; ii++, ptr++) { + /* Base pattern - first 32 bytes + */ + if (ii & 0x01) { + *ptr = (1 << byte); + byte++; + byte &= 0x07; + } else { + *ptr = (char) (~(1 << byte)); + } + + /* Flip the pattern every 32 bytes + */ + if (ii & 0x20) + *ptr = ~(*ptr); + } + } + break; + } +} +#endif /* ~MPTSCSIH_DISABLE_DOMAIN_VALIDATION */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Commandline Parsing routines and defines. + * + * insmod format: + * insmod mptscsih mptscsih="width:1 dv:n factor:0x09" + * boot format: + * mptscsih=width:1,dv:n,factor:0x8 + * + */ +#ifdef MODULE +#define ARG_SEP ' ' +#else +#define ARG_SEP ',' +#endif + +static char setup_token[] __initdata = + "dv:" + "width:" + "factor:" + ; /* DONNOT REMOVE THIS ';' */ + +#define OPT_DV 1 +#define OPT_MAX_WIDTH 2 +#define OPT_MIN_SYNC_FACTOR 3 + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +__init get_setup_token(char *p) +{ + char *cur = setup_token; + char *pc; + int i = 0; + + while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { + ++pc; + ++i; + if (!strncmp(p, cur, pc - cur)) + return i; + cur = pc; + } + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +__init mptscsih_setup(char *str) +{ + char *cur = str; + char *pc, *pv; + unsigned long val; + int c; + + printk("KERN_WARNING: mptscsih_setup arg %s\n", str); + + while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { + char *pe; + + val = 0; + pv = pc; + c = *++pv; + + if (c == 'n') + val = 0; + else if (c == 'y') + val = 1; + else + val = (int) simple_strtoul(pv, &pe, 0); + + printk("Found Token: %s, value %x\n", cur, (int)val); + switch (get_setup_token(cur)) { + case OPT_DV: + driver_setup.dv = val; + break; + + case OPT_MAX_WIDTH: + driver_setup.max_width = val; + break; + + case OPT_MIN_SYNC_FACTOR: + driver_setup.min_sync_fac = val; + break; + + default: + printk("mptscsih_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur); + break; + } + + if ((cur = strchr(cur, ARG_SEP)) != NULL) + ++cur; + } + return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff -Nru a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h --- a/drivers/message/fusion/mptscsih.h Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/mptscsih.h Fri Apr 26 00:01:27 2002 @@ -15,11 +15,12 @@ * * (see also mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptscsih.h,v 1.7 2001/01/11 16:56:43 sralston Exp $ + * $Id: mptscsih.h,v 1.16 2002/02/27 18:44:30 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -68,14 +69,47 @@ * SCSI Public stuff... */ -#ifdef __sparc__ -#define MPT_SCSI_CAN_QUEUE 63 -#define MPT_SCSI_CMD_PER_LUN 63 - /* FIXME! Still investigating qd=64 hang on sparc64... */ -#else -#define MPT_SCSI_CAN_QUEUE 64 -#define MPT_SCSI_CMD_PER_LUN 64 -#endif +/* + * Try to keep these at 2^N-1 + */ +#define MPT_FC_CAN_QUEUE 63 +#define MPT_SCSI_CAN_QUEUE 31 +#define MPT_SCSI_CMD_PER_LUN 7 + +#define MPT_SCSI_SG_DEPTH 40 + +/* To disable domain validation, uncomment the + * following line. No effect for FC devices. + * For SCSI devices, driver will negotiate to + * NVRAM settings (if available) or to maximum adapter + * capabilities. + */ +/* #define MPTSCSIH_DISABLE_DOMAIN_VALIDATION */ + + +/* SCSI driver setup structure. Settings can be overridden + * by command line options. + */ +#define MPTSCSIH_DOMAIN_VALIDATION 1 +#define MPTSCSIH_MAX_WIDTH 1 +#define MPTSCSIH_MIN_SYNC 0x08 + +struct mptscsih_driver_setup +{ + u8 dv; + u8 max_width; + u8 min_sync_fac; +}; + + +#define MPTSCSIH_DRIVER_SETUP \ +{ \ + MPTSCSIH_DOMAIN_VALIDATION, \ + MPTSCSIH_MAX_WIDTH, \ + MPTSCSIH_MIN_SYNC, \ +} + + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -143,6 +177,7 @@ #define x_scsi_dev_reset mptscsih_dev_reset #define x_scsi_host_reset mptscsih_host_reset #define x_scsi_bios_param mptscsih_bios_param +#define x_scsi_select_queue_depths mptscsih_select_queue_depths #define x_scsi_taskmgmt_bh mptscsih_taskmgmt_bh #define x_scsi_old_abort mptscsih_old_abort @@ -155,7 +190,6 @@ extern int x_scsi_detect(Scsi_Host_Template *); extern int x_scsi_release(struct Scsi_Host *host); extern const char *x_scsi_info(struct Scsi_Host *); -/*extern int x_scsi_command(Scsi_Cmnd *);*/ extern int x_scsi_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); #ifdef MPT_SCSI_USE_NEW_EH extern int x_scsi_abort(Scsi_Cmnd *); @@ -167,6 +201,7 @@ extern int x_scsi_old_reset(Scsi_Cmnd *, unsigned int); #endif extern int x_scsi_bios_param(Disk *, kdev_t, int *); +extern void x_scsi_select_queue_depths(struct Scsi_Host *, Scsi_Device *); extern void x_scsi_taskmgmt_bh(void *); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) @@ -194,10 +229,11 @@ bios_param: x_scsi_bios_param, \ can_queue: MPT_SCSI_CAN_QUEUE, \ this_id: -1, \ - sg_tablesize: 25, \ + sg_tablesize: MPT_SCSI_SG_DEPTH, \ cmd_per_lun: MPT_SCSI_CMD_PER_LUN, \ unchecked_isa_dma: 0, \ use_clustering: ENABLE_CLUSTERING, \ + use_new_eh_code: 1 \ } #else @@ -216,7 +252,7 @@ bios_param: x_scsi_bios_param, \ can_queue: MPT_SCSI_CAN_QUEUE, \ this_id: -1, \ - sg_tablesize: 25, \ + sg_tablesize: MPT_SCSI_SG_DEPTH, \ cmd_per_lun: MPT_SCSI_CMD_PER_LUN, \ unchecked_isa_dma: 0, \ use_clustering: ENABLE_CLUSTERING \ diff -Nru a/drivers/message/fusion/scsi3.h b/drivers/message/fusion/scsi3.h --- a/drivers/message/fusion/scsi3.h Fri Apr 26 00:01:27 2002 +++ b/drivers/message/fusion/scsi3.h Fri Apr 26 00:01:27 2002 @@ -4,11 +4,12 @@ * (Ultimately) SCSI-3 definitions; for now, inheriting * SCSI-2 definitions. * - * Copyright (c) 1996-2001 Steven J. Ralston + * Copyright (c) 1996-2002 Steven J. Ralston * Written By: Steven J. Ralston (19960517) - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: scsi3.h,v 1.5 2001/04/06 14:31:32 sralston Exp $ + * $Id: scsi3.h,v 1.9 2002/02/27 18:45:02 sralston Exp $ */ #ifndef SCSI3_H_INCLUDED @@ -63,7 +64,10 @@ #define CMD_Write10 0x2A #define CMD_WriteVerify 0x2E #define CMD_Verify 0x2F +#define CMD_SynchronizeCache 0x35 #define CMD_ReadDefectData 0x37 +#define CMD_WriteBuffer 0x3B +#define CMD_ReadBuffer 0x3C #define CMD_ReadLong 0x3E #define CMD_LogSelect 0x4C #define CMD_LogSense 0x4D diff -Nru a/drivers/net/eepro100.c b/drivers/net/eepro100.c --- a/drivers/net/eepro100.c Fri Apr 26 00:01:27 2002 +++ b/drivers/net/eepro100.c Fri Apr 26 00:01:27 2002 @@ -25,6 +25,8 @@ Disabled FC and ER, to avoid lockups when when we get FCP interrupts. 2000 Jul 17 Goutham Rao PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary + 2000 Aug 31 David Mosberger + RX_ALIGN support: enables rx DMA without causing unaligned accesses. */ static const char *version = @@ -41,14 +43,18 @@ static int txdmacount = 128; static int rxdmacount /* = 0 */; +#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \ + defined(__arm__) + /* align rx buffers to 2 bytes so that IP header is aligned */ +# define RX_ALIGN +# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed)) +#else +# define RxFD_ALIGNMENT +#endif + /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method. Lower values use more memory, but are faster. */ -#if defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \ - defined(__arm__) -static int rx_copybreak = 1518; -#else static int rx_copybreak = 200; -#endif /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 20; @@ -377,18 +383,18 @@ /* The Speedo3 Rx and Tx frame/buffer descriptors. */ struct descriptor { /* A generic descriptor. */ - s32 cmd_status; /* All command and status fields. */ + volatile s32 cmd_status; /* All command and status fields. */ u32 link; /* struct descriptor * */ unsigned char params[0]; }; /* The Speedo3 Rx and Tx buffer descriptors. */ struct RxFD { /* Receive frame descriptor. */ - s32 status; + volatile s32 status; u32 link; /* struct RxFD * */ u32 rx_buf_addr; /* void * */ u32 count; -}; +} RxFD_ALIGNMENT; /* Selected elements of the Tx/RxFD.status word. */ enum RxFD_bits { @@ -523,7 +529,9 @@ static int eepro100_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +#if defined(MODULE) || defined(CONFIG_HOTPLUG) static void eepro100_remove_one (struct pci_dev *pdev); +#endif #ifdef CONFIG_PM static int eepro100_suspend (struct pci_dev *pdev, u32 state); static int eepro100_resume (struct pci_dev *pdev); @@ -1233,6 +1241,9 @@ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); +#ifdef RX_ALIGN + skb_reserve(skb, 2); /* Align IP on 16 byte boundary */ +#endif sp->rx_skbuff[i] = skb; if (skb == NULL) break; /* OK. Just initially short of Rx bufs. */ @@ -1624,6 +1635,9 @@ struct sk_buff *skb; /* Get a fresh skbuff to replace the consumed one. */ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); +#ifdef RX_ALIGN + skb_reserve(skb, 2); /* Align IP on 16 byte boundary */ +#endif sp->rx_skbuff[entry] = skb; if (skb == NULL) { sp->rx_ringp[entry] = NULL; @@ -2307,7 +2321,9 @@ name: "eepro100", id_table: eepro100_pci_tbl, probe: eepro100_init_one, +# if defined(MODULE) || defined(CONFIG_HOTPLUG) remove: __devexit_p(eepro100_remove_one), +# endif #ifdef CONFIG_PM suspend: eepro100_suspend, resume: eepro100_resume, diff -Nru a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c --- a/drivers/scsi/scsi_ioctl.c Fri Apr 26 00:01:27 2002 +++ b/drivers/scsi/scsi_ioctl.c Fri Apr 26 00:01:27 2002 @@ -196,6 +196,9 @@ unsigned int needed, buf_needed; int timeout, retries, result; int data_direction, gfp_mask = GFP_KERNEL; +#if __GNUC__ < 3 + int foo; +#endif if (!sic) return -EINVAL; @@ -209,11 +212,21 @@ if (verify_area(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command))) return -EFAULT; +#if __GNUC__ < 3 + foo = __get_user(inlen, &sic->inlen); + if (foo) + return -EFAULT; + + foo = __get_user(outlen, &sic->outlen); + if (foo) + return -EFAULT; +#else if(__get_user(inlen, &sic->inlen)) return -EFAULT; if(__get_user(outlen, &sic->outlen)) return -EFAULT; +#endif /* * We do not transfer more than MAX_BUF with this interface. diff -Nru a/drivers/usb/host/usb-ohci.c b/drivers/usb/host/usb-ohci.c --- a/drivers/usb/host/usb-ohci.c Fri Apr 26 00:01:26 2002 +++ b/drivers/usb/host/usb-ohci.c Fri Apr 26 00:01:26 2002 @@ -2657,6 +2657,7 @@ } + #ifdef CONFIG_PM /*-------------------------------------------------------------------------*/ diff -Nru a/fs/binfmt_elf.c b/fs/binfmt_elf.c --- a/fs/binfmt_elf.c Fri Apr 26 00:01:27 2002 +++ b/fs/binfmt_elf.c Fri Apr 26 00:01:27 2002 @@ -567,10 +567,8 @@ // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); interpreter_type = INTERPRETER_ELF; } - } else { - /* Executables without an interpreter also need a personality */ - SET_PERSONALITY(elf_ex, ibcs2_interpreter); } + /* OK, we are done with that, now set up the arg stuff, and then start this sucker up */ diff -Nru a/fs/fcntl.c b/fs/fcntl.c --- a/fs/fcntl.c Fri Apr 26 00:01:26 2002 +++ b/fs/fcntl.c Fri Apr 26 00:01:26 2002 @@ -315,6 +315,7 @@ * to fix this will be in libc. */ err = filp->f_owner.pid; + force_successful_syscall_return(); break; case F_SETOWN: lock_kernel(); diff -Nru a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c --- a/fs/nfsd/nfsctl.c Fri Apr 26 00:01:26 2002 +++ b/fs/nfsd/nfsctl.c Fri Apr 26 00:01:26 2002 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff -Nru a/fs/proc/base.c b/fs/proc/base.c --- a/fs/proc/base.c Fri Apr 26 00:01:26 2002 +++ b/fs/proc/base.c Fri Apr 26 00:01:26 2002 @@ -513,7 +513,24 @@ } #endif +static loff_t mem_lseek(struct file * file, loff_t offset, int orig) +{ + switch (orig) { + case 0: + file->f_pos = offset; + break; + case 1: + file->f_pos += offset; + break; + default: + return -EINVAL; + } + force_successful_syscall_return(); + return file->f_pos; +} + static struct file_operations proc_mem_operations = { + llseek: mem_lseek, read: mem_read, write: mem_write, open: mem_open, diff -Nru a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h --- a/include/asm-i386/hw_irq.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-i386/hw_irq.h Fri Apr 26 00:01:27 2002 @@ -106,4 +106,6 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} #endif +extern irq_desc_t irq_desc [NR_IRQS]; + #endif /* _ASM_HW_IRQ_H */ diff -Nru a/include/asm-i386/page.h b/include/asm-i386/page.h --- a/include/asm-i386/page.h Fri Apr 26 00:01:26 2002 +++ b/include/asm-i386/page.h Fri Apr 26 00:01:26 2002 @@ -30,8 +30,8 @@ #endif -#define clear_user_page(page, vaddr) clear_page(page) -#define copy_user_page(to, from, vaddr) copy_page(to, from) +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) /* * These are used to make use of C type-checking.. diff -Nru a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h --- a/include/asm-i386/ptrace.h Fri Apr 26 00:01:26 2002 +++ b/include/asm-i386/ptrace.h Fri Apr 26 00:01:26 2002 @@ -58,6 +58,7 @@ #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) #define instruction_pointer(regs) ((regs)->eip) extern void show_regs(struct pt_regs *); +#define force_successful_syscall_return() do { } while (0) #endif #endif diff -Nru a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h --- a/include/asm-ia64/acpi.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/acpi.h Fri Apr 26 00:01:27 2002 @@ -32,6 +32,7 @@ #define __acpi_map_table(phys_addr, size) __va(phys_addr) +const char *acpi_get_sysname (void); int acpi_boot_init (char *cdline); int acpi_find_rsdp (unsigned long *phys_addr); int acpi_request_vector (u32 int_type); diff -Nru a/include/asm-ia64/errno.h b/include/asm-ia64/errno.h --- a/include/asm-ia64/errno.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/errno.h Fri Apr 26 00:01:27 2002 @@ -4,8 +4,8 @@ /* * This is derived from the Linux/x86 version. * - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang + * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co + * David Mosberger-Tang */ #define EPERM 1 /* Operation not permitted */ @@ -14,7 +14,7 @@ #define EINTR 4 /* Interrupted system call */ #define EIO 5 /* I/O error */ #define ENXIO 6 /* No such device or address */ -#define E2BIG 7 /* Arg list too long */ +#define E2BIG 7 /* Argument list too long */ #define ENOEXEC 8 /* Exec format error */ #define EBADF 9 /* Bad file number */ #define ECHILD 10 /* No child processes */ diff -Nru a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h --- a/include/asm-ia64/ioctls.h Fri Apr 26 00:01:26 2002 +++ b/include/asm-ia64/ioctls.h Fri Apr 26 00:01:26 2002 @@ -2,8 +2,8 @@ #define _ASM_IA64_IOCTLS_H /* - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang + * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co + * David Mosberger-Tang */ #include @@ -11,7 +11,7 @@ /* 0x54 is just a magic number to make these relatively unique ('T') */ #define TCGETS 0x5401 -#define TCSETS 0x5402 +#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ #define TCSETSW 0x5403 #define TCSETSF 0x5404 #define TCGETA 0x5405 diff -Nru a/include/asm-ia64/page.h b/include/asm-ia64/page.h --- a/include/asm-ia64/page.h Fri Apr 26 00:01:26 2002 +++ b/include/asm-ia64/page.h Fri Apr 26 00:01:26 2002 @@ -71,7 +71,7 @@ #ifdef CONFIG_IA64_GENERIC # include # define virt_to_page(kaddr) (mem_map + platform_map_nr(kaddr)) -# define page_to_phys(page) XXX fix me +# define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) #elif defined (CONFIG_IA64_SGI_SN1) # ifndef CONFIG_DISCONTIGMEM # define virt_to_page(kaddr) (mem_map + MAP_NR_DENSE(kaddr)) diff -Nru a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h --- a/include/asm-ia64/processor.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/processor.h Fri Apr 26 00:01:27 2002 @@ -154,6 +154,7 @@ #define IA64_ISR_SP_BIT 36 /* speculative load exception */ #define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */ #define IA64_ISR_IR_BIT 38 /* invalid register frame exception */ +#define IA64_ISR_CODE_MASK 0xf #define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT) #define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT) @@ -162,6 +163,14 @@ #define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT) #define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT) #define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT) + +/* ISR code field for non-access instructions */ +#define IA64_ISR_CODE_TPA 0 +#define IA64_ISR_CODE_FC 1 +#define IA64_ISR_CODE_PROBE 2 +#define IA64_ISR_CODE_TAK 3 +#define IA64_ISR_CODE_LFETCH 4 +#define IA64_ISR_CODE_PROBEF 5 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ diff -Nru a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h --- a/include/asm-ia64/siginfo.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/siginfo.h Fri Apr 26 00:01:27 2002 @@ -154,7 +154,8 @@ #define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */ #define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */ #define __ILL_BREAK (__SI_FAULT|10) /* illegal break */ -#define NSIGILL 10 +#define __ILL_BNDMOD (__SI_FAULT|11) /* bundle-update (modification) in progress */ +#define NSIGILL 11 /* * SIGFPE si_codes diff -Nru a/include/asm-ia64/sn/sn2/shub_md.h b/include/asm-ia64/sn/sn2/shub_md.h --- a/include/asm-ia64/sn/sn2/shub_md.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/sn/sn2/shub_md.h Fri Apr 26 00:01:27 2002 @@ -1,14 +1,12 @@ -/************************************************************************** - * * - * Copyright (C) 2001 Silicon Graphics, Inc. All rights reserved. * - * * - * These coded instructions, statements, and computer programs contain * - * unpublished proprietary information of Silicon Graphics, Inc., and * - * are protected by Federal copyright law. They may not be disclosed * - * to third parties or copied or duplicated in any form, in whole or * - * in part, without the prior written consent of Silicon Graphics, Inc. * - * * - **************************************************************************/ +/* + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001, 2002 Silicon Graphics, Inc. All rights reserved. + */ + #ifndef _SHUB_MD_H #define _SHUB_MD_H diff -Nru a/include/asm-ia64/string.h b/include/asm-ia64/string.h --- a/include/asm-ia64/string.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/string.h Fri Apr 26 00:01:27 2002 @@ -18,20 +18,6 @@ extern __kernel_size_t strlen (const char *); extern void *memcpy (void *, const void *, __kernel_size_t); - -extern void *__memset_generic (void *, int, __kernel_size_t); -extern void __bzero (void *, __kernel_size_t); - -#define memset(s, c, count) \ -({ \ - void *_s = (s); \ - int _c = (c); \ - __kernel_size_t _count = (count); \ - \ - if (__builtin_constant_p(_c) && _c == 0) \ - __bzero(_s, _count); \ - else \ - __memset_generic(_s, _c, _count); \ -}) +extern void *memset (void *, int, __kernel_size_t); #endif /* _ASM_IA64_STRING_H */ diff -Nru a/include/asm-ia64/system.h b/include/asm-ia64/system.h --- a/include/asm-ia64/system.h Fri Apr 26 00:01:27 2002 +++ b/include/asm-ia64/system.h Fri Apr 26 00:01:27 2002 @@ -353,6 +353,9 @@ # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) #else # define IS_IA32_PROCESS(regs) 0 +struct task_struct; +static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} +static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} #endif /* diff -Nru a/include/linux/acpi.h b/include/linux/acpi.h --- a/include/linux/acpi.h Fri Apr 26 00:01:26 2002 +++ b/include/linux/acpi.h Fri Apr 26 00:01:26 2002 @@ -29,6 +29,8 @@ #define _LINUX #endif +#include + /* * YES this is ugly. * But, moving all of ACPI's private headers to include/acpi isn't the right diff -Nru a/include/linux/agp_backend.h b/include/linux/agp_backend.h --- a/include/linux/agp_backend.h Fri Apr 26 00:01:27 2002 +++ b/include/linux/agp_backend.h Fri Apr 26 00:01:27 2002 @@ -52,6 +52,7 @@ INTEL_I845, INTEL_I850, INTEL_I860, + INTEL_460GX, VIA_GENERIC, VIA_VP3, VIA_MVP3, @@ -74,7 +75,8 @@ ALI_GENERIC, SVWRKS_HE, SVWRKS_LE, - SVWRKS_GENERIC + SVWRKS_GENERIC, + HP_ZX1, }; typedef struct _agp_version { @@ -115,6 +117,7 @@ size_t page_count; int num_scratch_pages; unsigned long *memory; + void *vmptr; off_t pg_start; u32 type; u32 physical; diff -Nru a/include/linux/fs.h b/include/linux/fs.h --- a/include/linux/fs.h Fri Apr 26 00:01:27 2002 +++ b/include/linux/fs.h Fri Apr 26 00:01:27 2002 @@ -547,12 +547,17 @@ extern int init_private_file(struct file *, struct dentry *, int); +/* Max fileoffset that can safely be dealt with by filesystems that have not (yet) been + audited for 64-bit issues. */ #define MAX_NON_LFS ((1UL<<31) - 1) -/* Page cache limit. The filesystems should put that into their s_maxbytes - limits, otherwise bad things can happen in VM. */ +/* Max fileoffset that can be stored in a variable of type offset_t. */ +#define MAX_OFF_T ((loff_t)((1UL << ((sizeof(off_t)*8) - 1)) - 1)) + +/* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE 0x7fffffffffffffff #endif diff -Nru a/include/linux/highmem.h b/include/linux/highmem.h --- a/include/linux/highmem.h Fri Apr 26 00:01:26 2002 +++ b/include/linux/highmem.h Fri Apr 26 00:01:26 2002 @@ -87,7 +87,7 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_atomic(page, KM_USER0); - clear_user_page(addr, vaddr); + clear_user_page(addr, vaddr, page); kunmap_atomic(addr, KM_USER0); } @@ -121,7 +121,7 @@ vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); - copy_user_page(vto, vfrom, vaddr); + copy_user_page(vto, vfrom, vaddr, to); kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); } diff -Nru a/include/linux/irq.h b/include/linux/irq.h --- a/include/linux/irq.h Fri Apr 26 00:01:27 2002 +++ b/include/linux/irq.h Fri Apr 26 00:01:27 2002 @@ -56,15 +56,13 @@ * * Pad this out to 32 bytes for cache and indexing reasons. */ -typedef struct { +typedef struct irq_desc { unsigned int status; /* IRQ status */ hw_irq_controller *handler; struct irqaction *action; /* IRQ action list */ unsigned int depth; /* nested irq disables */ spinlock_t lock; } ____cacheline_aligned irq_desc_t; - -extern irq_desc_t irq_desc [NR_IRQS]; #include /* the arch dependent stuff */ diff -Nru a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h --- a/include/linux/irq_cpustat.h Fri Apr 26 00:01:27 2002 +++ b/include/linux/irq_cpustat.h Fri Apr 26 00:01:27 2002 @@ -24,16 +24,32 @@ #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #else #define __IRQ_STAT(cpu, member) ((void)(cpu), irq_stat[0].member) -#endif +#endif #endif /* arch independent irq_stat fields */ #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) -#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) -#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) +#define irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) +#define bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ + +#define local_hardirq_trylock() hardirq_trylock(smp_processor_id()) +#define local_hardirq_endlock() hardirq_trylock(smp_processor_id()) +#define local_irq_enter(irq) irq_enter(smp_processor_id(), (irq)) +#define local_irq_exit(irq) irq_exit(smp_processor_id(), (irq)) +#define local_softirq_pending() softirq_pending(smp_processor_id()) +#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id()) + +/* These will lose the "really_" prefix when the interim macros below are removed. */ +#define really_local_irq_count()bh_count(smp_processor_id()) +#define really_local_bh_count() bh_count(smp_processor_id()) + +/* Interim macros for backward compatibility. They are deprecated. Use irq_count() and + bh_count() instead. --davidm 01/11/28 */ +#define local_irq_count(cpu) irq_count(cpu) +#define local_bh_count(cpu) bh_count(cpu) #endif /* __irq_cpustat_h */ diff -Nru a/include/linux/kernel.h b/include/linux/kernel.h --- a/include/linux/kernel.h Fri Apr 26 00:01:26 2002 +++ b/include/linux/kernel.h Fri Apr 26 00:01:26 2002 @@ -37,6 +37,13 @@ #define KERN_INFO "<6>" /* informational */ #define KERN_DEBUG "<7>" /* debug-level messages */ +extern int console_printk[]; + +#define console_loglevel (console_printk[0]) +#define default_message_loglevel (console_printk[1]) +#define minimum_console_loglevel (console_printk[2]) +#define default_console_loglevel (console_printk[3]) + struct completion; extern struct notifier_block *panic_notifier_list; @@ -72,8 +79,6 @@ asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); - -extern int console_loglevel; static inline void console_silent(void) { diff -Nru a/include/linux/percpu.h b/include/linux/percpu.h --- a/include/linux/percpu.h Fri Apr 26 00:01:27 2002 +++ b/include/linux/percpu.h Fri Apr 26 00:01:27 2002 @@ -2,11 +2,11 @@ #define __LINUX_PERCPU_H #include -#ifdef CONFIG_SMP #define __per_cpu_data __attribute__((section(".data.percpu"))) + +#ifdef CONFIG_SMP #include #else -#define __per_cpu_data #define per_cpu(var, cpu) var #define this_cpu(var) var #endif diff -Nru a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h Fri Apr 26 00:01:26 2002 +++ b/include/linux/sched.h Fri Apr 26 00:01:26 2002 @@ -407,14 +407,14 @@ #ifndef INIT_THREAD_SIZE # define INIT_THREAD_SIZE 2048*sizeof(long) -#endif - union thread_union { struct thread_info thread_info; unsigned long stack[INIT_THREAD_SIZE/sizeof(long)]; }; extern union thread_union init_thread_union; +#endif + extern struct task_struct init_task; extern struct mm_struct init_mm; diff -Nru a/include/linux/smp.h b/include/linux/smp.h --- a/include/linux/smp.h Fri Apr 26 00:01:27 2002 +++ b/include/linux/smp.h Fri Apr 26 00:01:27 2002 @@ -37,11 +37,6 @@ extern void smp_boot_cpus(void); /* - * Processor call in. Must hold processors until .. - */ -extern void smp_callin(void); - -/* * Multiprocessors may now schedule */ extern void smp_commence(void); @@ -55,14 +50,10 @@ /* * True once the per process idle is forked */ -extern int smp_threads_ready; +extern volatile int smp_threads_ready; extern int smp_num_cpus; -extern volatile unsigned long smp_msg_data; -extern volatile int smp_src_cpu; -extern volatile int smp_msg_id; - #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 @@ -92,9 +83,6 @@ #define cpu_online_map 1 static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_all(void) { } -#define __per_cpu_data -#define per_cpu(var, cpu) var -#define this_cpu(var) var #endif #endif diff -Nru a/kernel/exec_domain.c b/kernel/exec_domain.c --- a/kernel/exec_domain.c Fri Apr 26 00:01:27 2002 +++ b/kernel/exec_domain.c Fri Apr 26 00:01:27 2002 @@ -196,8 +196,10 @@ put_exec_domain(oep); +#if 0 printk(KERN_DEBUG "[%s:%d]: set personality to %lx\n", current->comm, current->pid, personality); +#endif return 0; } diff -Nru a/kernel/fork.c b/kernel/fork.c --- a/kernel/fork.c Fri Apr 26 00:01:26 2002 +++ b/kernel/fork.c Fri Apr 26 00:01:26 2002 @@ -98,6 +98,13 @@ init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; } +#if 1 + +extern struct task_struct *dup_task_struct(struct task_struct *orig); +extern void __put_task_struct(struct task_struct *tsk); + +#else + struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; @@ -126,6 +133,8 @@ free_thread_info(tsk->thread_info); kmem_cache_free(task_struct_cachep,tsk); } + +#endif /* Protects next_safe and last_pid. */ spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED; diff -Nru a/kernel/ksyms.c b/kernel/ksyms.c --- a/kernel/ksyms.c Fri Apr 26 00:01:26 2002 +++ b/kernel/ksyms.c Fri Apr 26 00:01:26 2002 @@ -384,7 +384,7 @@ EXPORT_SYMBOL(del_timer); EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); -#if !defined(CONFIG_ARCH_S390) +#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_IA64) EXPORT_SYMBOL(irq_stat); /* No separate irq_stat for s390, it is part of PSA */ #endif @@ -590,7 +590,9 @@ /* init task, for moving kthread roots - ought to export a function ?? */ EXPORT_SYMBOL(init_task); +#ifndef CONFIG_IA64 EXPORT_SYMBOL(init_thread_union); +#endif EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(pidhash); diff -Nru a/kernel/printk.c b/kernel/printk.c --- a/kernel/printk.c Fri Apr 26 00:01:27 2002 +++ b/kernel/printk.c Fri Apr 26 00:01:27 2002 @@ -16,6 +16,7 @@ * 01Mar01 Andrew Morton */ +#include #include #include #include @@ -52,11 +53,12 @@ DECLARE_WAIT_QUEUE_HEAD(log_wait); -/* Keep together for sysctl support */ -int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL; -int default_message_loglevel = DEFAULT_MESSAGE_LOGLEVEL; -int minimum_console_loglevel = MINIMUM_CONSOLE_LOGLEVEL; -int default_console_loglevel = DEFAULT_CONSOLE_LOGLEVEL; +int console_printk[4] = { + DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ + DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ + MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ + DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ +}; int oops_in_progress; @@ -323,6 +325,12 @@ __call_console_drivers(start, end); } } +#ifdef CONFIG_IA64_EARLY_PRINTK + if (!console_drivers) { + static void early_printk (const char *str, size_t len); + early_printk(&LOG_BUF(start), end - start); + } +#endif } /* @@ -682,3 +690,50 @@ tty->driver.write(tty, 0, msg, strlen(msg)); return; } + +#ifdef CONFIG_IA64_EARLY_PRINTK + +#include + +#define VGABASE ((char *)0xc0000000000b8000) +#define VGALINES 24 +#define VGACOLS 80 + +static int current_ypos = VGALINES, current_xpos = 0; + +void +early_printk (const char *str, size_t len) +{ + char c; + int i, k, j; + + while (len-- > 0) { + c = *str++; + if (current_ypos >= VGALINES) { + /* scroll 1 line up */ + for (k = 1, j = 0; k < VGALINES; k++, j++) { + for (i = 0; i < VGACOLS; i++) { + writew(readw(VGABASE + 2*(VGACOLS*k + i)), + VGABASE + 2*(VGACOLS*j + i)); + } + } + for (i = 0; i < VGACOLS; i++) { + writew(0x720, VGABASE + 2*(VGACOLS*j + i)); + } + current_ypos = VGALINES-1; + } + if (c == '\n') { + current_xpos = 0; + current_ypos++; + } else if (c != '\r') { + writew(((0x7 << 8) | (unsigned short) c), + VGABASE + 2*(VGACOLS*current_ypos + current_xpos++)); + if (current_xpos >= VGACOLS) { + current_xpos = 0; + current_ypos++; + } + } + } +} + +#endif /* CONFIG_IA64_EARLY_PRINTK */ diff -Nru a/kernel/sched.c b/kernel/sched.c --- a/kernel/sched.c Fri Apr 26 00:01:27 2002 +++ b/kernel/sched.c Fri Apr 26 00:01:27 2002 @@ -672,7 +672,7 @@ task_t *p = current; if (p == rq->idle) { - if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + if (really_local_bh_count() || really_local_irq_count() > 1) kstat.per_cpu_system[cpu] += system; #if CONFIG_SMP idle_tick(); diff -Nru a/kernel/softirq.c b/kernel/softirq.c --- a/kernel/softirq.c Fri Apr 26 00:01:27 2002 +++ b/kernel/softirq.c Fri Apr 26 00:01:27 2002 @@ -40,7 +40,10 @@ - Bottom halves: globally serialized, grr... */ +/* No separate irq_stat for ia64, it is part of PSA */ +#if !defined(CONFIG_IA64) irq_cpustat_t irq_stat[NR_CPUS]; +#endif /* CONFIG_IA64 */ static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; @@ -60,7 +63,6 @@ asmlinkage void do_softirq() { - int cpu = smp_processor_id(); __u32 pending; long flags; __u32 mask; @@ -70,7 +72,7 @@ local_irq_save(flags); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending) { struct softirq_action *h; @@ -79,7 +81,7 @@ local_bh_disable(); restart: /* Reset the pending bitmask before enabling irqs */ - softirq_pending(cpu) = 0; + local_softirq_pending() = 0; local_irq_enable(); @@ -94,7 +96,7 @@ local_irq_disable(); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending & mask) { mask &= ~pending; goto restart; @@ -102,7 +104,7 @@ __local_bh_enable(); if (pending) - wakeup_softirqd(cpu); + wakeup_softirqd(smp_processor_id()); } local_irq_restore(flags); @@ -124,7 +126,7 @@ * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ - if (!(local_irq_count(cpu) | local_bh_count(cpu))) + if (!(irq_count(cpu) | bh_count(cpu))) wakeup_softirqd(cpu); } @@ -287,18 +289,16 @@ static void bh_action(unsigned long nr) { - int cpu = smp_processor_id(); - if (!spin_trylock(&global_bh_lock)) goto resched; - if (!hardirq_trylock(cpu)) + if (!local_hardirq_trylock()) goto resched_unlock; if (bh_base[nr]) bh_base[nr](); - hardirq_endlock(cpu); + local_hardirq_endlock(); spin_unlock(&global_bh_lock); return; @@ -377,15 +377,15 @@ __set_current_state(TASK_INTERRUPTIBLE); mb(); - ksoftirqd_task(cpu) = current; + local_ksoftirqd_task() = current; for (;;) { - if (!softirq_pending(cpu)) + if (!local_softirq_pending()) schedule(); __set_current_state(TASK_RUNNING); - while (softirq_pending(cpu)) { + while (local_softirq_pending()) { do_softirq(); cond_resched(); } diff -Nru a/kernel/time.c b/kernel/time.c --- a/kernel/time.c Fri Apr 26 00:01:26 2002 +++ b/kernel/time.c Fri Apr 26 00:01:26 2002 @@ -39,6 +39,7 @@ /* The xtime_lock is not only serializing the xtime read/writes but it's also serializing all accesses to the global NTP variables now. */ extern rwlock_t xtime_lock; +extern unsigned long last_time_offset; #if !defined(__alpha__) && !defined(__ia64__) @@ -82,6 +83,7 @@ write_lock_irq(&xtime_lock); xtime.tv_sec = value; xtime.tv_usec = 0; + last_time_offset = 0; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; @@ -127,6 +129,7 @@ { write_lock_irq(&xtime_lock); xtime.tv_sec += sys_tz.tz_minuteswest * 60; + last_time_offset = 0; write_unlock_irq(&xtime_lock); } @@ -386,6 +389,7 @@ txc->calcnt = pps_calcnt; txc->errcnt = pps_errcnt; txc->stbcnt = pps_stbcnt; + last_time_offset = 0; write_unlock_irq(&xtime_lock); do_gettimeofday(&txc->time); return(result); diff -Nru a/kernel/timer.c b/kernel/timer.c --- a/kernel/timer.c Fri Apr 26 00:01:27 2002 +++ b/kernel/timer.c Fri Apr 26 00:01:27 2002 @@ -635,6 +635,7 @@ * This spinlock protect us from races in SMP while playing with xtime. -arca */ rwlock_t xtime_lock = RW_LOCK_UNLOCKED; +unsigned long last_time_offset; static inline void update_times(void) { @@ -652,6 +653,7 @@ wall_jiffies += ticks; update_wall_time(ticks); } + last_time_offset = 0; write_unlock_irq(&xtime_lock); calc_load(ticks); } @@ -664,7 +666,7 @@ void do_timer(struct pt_regs *regs) { - (*(unsigned long *)&jiffies)++; + (*(volatile unsigned long *)&jiffies)++; #ifndef CONFIG_SMP /* SMP process accounting uses the local APIC timer */ @@ -885,7 +887,7 @@ if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0) return -EINVAL; - +#if !defined(__ia64__) if (t.tv_sec == 0 && t.tv_nsec <= 2000000L && current->policy != SCHED_OTHER) { @@ -898,6 +900,7 @@ udelay((t.tv_nsec + 999) / 1000); return 0; } +#endif expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); diff -Nru a/lib/brlock.c b/lib/brlock.c --- a/lib/brlock.c Fri Apr 26 00:01:26 2002 +++ b/lib/brlock.c Fri Apr 26 00:01:26 2002 @@ -18,7 +18,7 @@ #ifdef __BRLOCK_USE_ATOMICS brlock_read_lock_t __brlock_array[NR_CPUS][__BR_IDX_MAX] = - { [0 ... NR_CPUS-1] = { [0 ... __BR_IDX_MAX-1] = RW_LOCK_UNLOCKED } }; + { [0 ... NR_CPUS-1] = { [0 ... __BR_IDX_MAX-1] = {0, 0} } }; void __br_write_lock (enum brlock_indices idx) { diff -Nru a/mm/filemap.c b/mm/filemap.c --- a/mm/filemap.c Fri Apr 26 00:01:27 2002 +++ b/mm/filemap.c Fri Apr 26 00:01:27 2002 @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -1356,6 +1357,9 @@ return written; } +#ifndef __NR_sendfile64 +inline +#endif static ssize_t common_sendfile(int out_fd, int in_fd, loff_t *offset, size_t count, loff_t max) { ssize_t retval; @@ -1450,12 +1454,13 @@ pos = off; ppos = &pos; } - ret = common_sendfile(out_fd, in_fd, ppos, count, MAX_NON_LFS); + ret = common_sendfile(out_fd, in_fd, ppos, count, MAX_OFF_T); if (offset && put_user(pos, offset)) ret = -EFAULT; return ret; } +#ifdef __NR_sendfile64 asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t count) { loff_t pos, *ppos = NULL; @@ -1470,6 +1475,7 @@ ret = -EFAULT; return ret; } +#endif static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr) { diff -Nru a/mm/memory.c b/mm/memory.c --- a/mm/memory.c Fri Apr 26 00:01:27 2002 +++ b/mm/memory.c Fri Apr 26 00:01:27 2002 @@ -120,7 +120,7 @@ pmd = pmd_offset(dir, 0); pgd_clear(dir); for (j = 0; j < PTRS_PER_PMD ; j++) { - prefetchw(pmd+j+(PREFETCH_STRIDE/16)); + prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd)); free_one_pmd(pmd+j); } pmd_free(pmd); diff -Nru a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c --- a/sound/oss/cs4281/cs4281m.c Fri Apr 26 00:01:27 2002 +++ b/sound/oss/cs4281/cs4281m.c Fri Apr 26 00:01:27 2002 @@ -1943,8 +1943,8 @@ len -= x; } CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO - "cs4281: clear_advance(): memset %d at 0x%.8x for %d size \n", - (unsigned)c, (unsigned)((char *) buf) + bptr, len)); + "cs4281: clear_advance(): memset %d at %p for %d size \n", + (unsigned)c, ((char *) buf) + bptr, len)); memset(((char *) buf) + bptr, c, len); } @@ -1979,9 +1979,8 @@ wake_up(&s->dma_adc.wait); } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", - (unsigned)s, s->dma_adc.hwptr, - s->dma_adc.total_bytes, s->dma_adc.count)); + "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", + s, s->dma_adc.hwptr, s->dma_adc.total_bytes, s->dma_adc.count)); } // update DAC pointer // @@ -2013,11 +2012,10 @@ // Continue to play silence until the _release. // CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): memset %d at 0x%.8x for %d size \n", + "cs4281: cs4281_update_ptr(): memset %d at %p for %d size \n", (unsigned)(s->prop_dac.fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0, - (unsigned)s->dma_dac.rawbuf, - s->dma_dac.dmasize)); + s->dma_dac.rawbuf, s->dma_dac.dmasize)); memset(s->dma_dac.rawbuf, (s->prop_dac. fmt & (AFMT_U8 | AFMT_U16_LE)) ? @@ -2048,9 +2046,8 @@ } } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", - (unsigned) s, s->dma_dac.hwptr, - s->dma_dac.total_bytes, s->dma_dac.count)); + "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", + s, s->dma_dac.hwptr, s->dma_dac.total_bytes, s->dma_dac.count)); } } @@ -2181,8 +2178,7 @@ VALIDATE_STATE(s); CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n", - (unsigned) s, cmd)); + "cs4281: mixer_ioctl(): s=%p cmd=0x%.8x\n", s, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -2747,9 +2743,8 @@ CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4281: CopySamples()+ ")); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " dst=0x%x src=0x%x count=%d iChannels=%d fmt=0x%x\n", - (unsigned) dst, (unsigned) src, (unsigned) count, - (unsigned) iChannels, (unsigned) fmt)); + " dst=%p src=%p count=%d iChannels=%d fmt=0x%x\n", + dst, src, (unsigned) count, (unsigned) iChannels, (unsigned) fmt)); // Gershwin does format conversion in hardware so normally // we don't do any host based coversion. The data formatter @@ -2829,9 +2824,9 @@ void *src = hwsrc; //default to the standard destination buffer addr CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO - "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=0x%.8x\n", + "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=%p\n", s->prop_adc.fmt, s->prop_adc.fmt_original, - (unsigned) cnt, (unsigned) dest)); + (unsigned) cnt, dest)); if (cnt > s->dma_adc.dmasize) { cnt = s->dma_adc.dmasize; @@ -2876,7 +2871,7 @@ unsigned copied = 0; CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()+ %d \n", count)); + printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count)); VALIDATE_STATE(s); if (ppos != &file->f_pos) @@ -2899,7 +2894,7 @@ // while (count > 0) { CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n", + "_read() count>0 count=%Zu .count=%d .swptr=%d .hwptr=%d \n", count, s->dma_adc.count, s->dma_adc.swptr, s->dma_adc.hwptr)); spin_lock_irqsave(&s->lock, flags); @@ -2956,11 +2951,10 @@ // the "cnt" is the number of bytes to read. CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO - "_read() copy_to cnt=%d count=%d ", cnt, count)); + "_read() copy_to cnt=%d count=%Zu ", cnt, count)); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " .dmasize=%d .count=%d buffer=0x%.8x ret=%d\n", - s->dma_adc.dmasize, s->dma_adc.count, - (unsigned) buffer, ret)); + " .dmasize=%d .count=%d buffer=%p ret=%Zd\n", + s->dma_adc.dmasize, s->dma_adc.count, buffer, ret)); if (cs_copy_to_user (s, buffer, s->dma_adc.rawbuf + swptr, cnt, &copied)) @@ -2976,7 +2970,7 @@ start_adc(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()- %d\n", ret)); + printk(KERN_INFO "cs4281: cs4281_read()- %Zd\n", ret)); return ret; } @@ -2992,7 +2986,7 @@ int cnt; CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()+ count=%d\n", + printk(KERN_INFO "cs4281: cs4281_write()+ count=%Zu\n", count)); VALIDATE_STATE(s); @@ -3048,7 +3042,7 @@ start_dac(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()- %d\n", ret)); + printk(KERN_INFO "cs4281: cs4281_write()- %Zd\n", ret)); return ret; } @@ -3169,8 +3163,7 @@ int val, mapped, ret; CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: cs4281_ioctl(): file=0x%.8x cmd=0x%.8x\n", - (unsigned) file, cmd)); + "cs4281: cs4281_ioctl(): file=%p cmd=0x%.8x\n", file, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -3596,8 +3589,8 @@ (struct cs4281_state *) file->private_data; CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO - "cs4281: cs4281_release(): inode=0x%.8x file=0x%.8x f_mode=%d\n", - (unsigned) inode, (unsigned) file, file->f_mode)); + "cs4281: cs4281_release(): inode=%p file=%p f_mode=%d\n", + inode, file, file->f_mode)); VALIDATE_STATE(s); @@ -3631,8 +3624,8 @@ struct list_head *entry; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO - "cs4281: cs4281_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n", - (unsigned) inode, (unsigned) file, file->f_mode)); + "cs4281: cs4281_open(): inode=%p file=%p f_mode=0x%x\n", + inode, file, file->f_mode)); list_for_each(entry, &cs4281_devs) { @@ -4341,10 +4334,8 @@ CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO - "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=0x%.8x pBA1=0x%.8x \n", - (unsigned) temp1, (unsigned) temp2, - (unsigned) s->pBA0, (unsigned) s->pBA1)); - + "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=%p pBA1=%p \n", + (unsigned) temp1, (unsigned) temp2, s->pBA0, s->pBA1)); CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO "cs4281: probe() pBA0phys=0x%.8x pBA1phys=0x%.8x\n", @@ -4391,15 +4382,13 @@ if (pmdev) { CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO - "cs4281: probe() pm_register() succeeded (0x%x).\n", - (unsigned)pmdev)); + "cs4281: probe() pm_register() succeeded (%p).\n", pmdev)); pmdev->data = s; } else { CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO - "cs4281: probe() pm_register() failed (0x%x).\n", - (unsigned)pmdev)); + "cs4281: probe() pm_register() failed (%p).\n", pmdev)); s->pm.flags |= CS4281_PM_NOT_REGISTERED; } #endif diff -Nru a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c --- a/sound/oss/cs4281/cs4281pm-24.c Fri Apr 26 00:01:27 2002 +++ b/sound/oss/cs4281/cs4281pm-24.c Fri Apr 26 00:01:27 2002 @@ -38,16 +38,16 @@ #define CS4281_SUSPEND_TBL cs4281_suspend_tbl #define CS4281_RESUME_TBL cs4281_resume_tbl */ -#define CS4281_SUSPEND_TBL cs4281_null -#define CS4281_RESUME_TBL cs4281_null +#define CS4281_SUSPEND_TBL (int (*) (struct pci_dev *, u32)) cs4281_null +#define CS4281_RESUME_TBL (int (*) (struct pci_dev *)) cs4281_null int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) { struct cs4281_state *state; CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: cs4281_pm_callback dev=0x%x rqst=0x%x state=%d\n", - (unsigned)dev,(unsigned)rqst,(unsigned)data)); + "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n", + dev,(unsigned)rqst,data)); state = (struct cs4281_state *) dev->data; if (state) { switch(rqst) { @@ -78,7 +78,7 @@ } #else /* CS4281_PM */ -#define CS4281_SUSPEND_TBL cs4281_null -#define CS4281_RESUME_TBL cs4281_null +#define CS4281_SUSPEND_TBL (int (*) (struct pci_dev *, u32)) cs4281_null +#define CS4281_RESUME_TBL (int (*) (struct pci_dev *)) cs4281_null #endif /* CS4281_PM */