diff -Nru a/Documentation/mmio_barrier.txt b/Documentation/mmio_barrier.txt --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/Documentation/mmio_barrier.txt Thu May 30 21:28:59 2002 @@ -0,0 +1,15 @@ +On some platforms, so-called memory-mapped I/O is weakly ordered. For +example, the following might occur: + +CPU A writes 0x1 to Device #1 +CPU B writes 0x2 to Device #1 +Device #1 sees 0x2 +Device #1 sees 0x1 + +On such platforms, driver writers are responsible for ensuring that I/O +writes to memory-mapped addresses on their device arrive in the order +intended. The mmiob() macro is provided for this purpose. A typical use +of this macro might be immediately prior to the exit of a critical +section of code proteced by spinlocks. This would ensure that subsequent +writes to I/O space arrived only after all prior writes (much like a +typical memory barrier op, mb(), only with respect to I/O). diff -Nru a/Makefile b/Makefile --- a/Makefile Thu May 30 21:28:58 2002 +++ b/Makefile Thu May 30 21:28:58 2002 @@ -95,7 +95,7 @@ CPPFLAGS := -D__KERNEL__ -I$(HPATH) -CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ +CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -g -O2 \ -fomit-frame-pointer -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS) diff -Nru a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c --- a/arch/i386/mm/fault.c Thu May 30 21:28:58 2002 +++ b/arch/i386/mm/fault.c Thu May 30 21:28:58 2002 @@ -27,8 +27,6 @@ extern void die(const char *,struct pt_regs *,long); -extern int console_loglevel; - /* * Ugly, ugly, but the goto's result in better assembly.. */ diff -Nru a/arch/ia64/Config.help b/arch/ia64/Config.help --- a/arch/ia64/Config.help Thu May 30 21:28:59 2002 +++ b/arch/ia64/Config.help Thu May 30 21:28:59 2002 @@ -400,7 +400,7 @@ Select your IA64 processor type. The default is Intel Itanium. CONFIG_MCKINLEY - Select this to configure for a McKinley processor. + Select this to configure for an Itanium 2 (McKinley) processor. CONFIG_IA64_GENERIC This selects the system type of your hardware. A "generic" kernel diff -Nru a/arch/ia64/Makefile b/arch/ia64/Makefile --- a/arch/ia64/Makefile Thu May 30 21:28:58 2002 +++ b/arch/ia64/Makefile Thu May 30 21:28:58 2002 @@ -69,13 +69,6 @@ $(CORE_FILES) endif -ifdef CONFIG_IA64_SOFTSDV - SUBDIRS := arch/$(ARCH)/dig \ - $(SUBDIRS) - CORE_FILES := arch/$(ARCH)/dig/dig.a \ - $(CORE_FILES) -endif - ifdef CONFIG_IA64_DIG SUBDIRS := arch/$(ARCH)/dig \ $(SUBDIRS) diff -Nru a/arch/ia64/config.in b/arch/ia64/config.in --- a/arch/ia64/config.in Thu May 30 21:28:59 2002 +++ b/arch/ia64/config.in Thu May 30 21:28:59 2002 @@ -16,7 +16,7 @@ choice 'IA-64 processor type' \ "Itanium CONFIG_ITANIUM \ - McKinley CONFIG_MCKINLEY" Itanium + Itanium-2 CONFIG_MCKINLEY" Itanium choice 'IA-64 system type' \ "generic CONFIG_IA64_GENERIC \ @@ -26,11 +26,18 @@ SGI-SN1 CONFIG_IA64_SGI_SN1 \ SGI-SN2 CONFIG_IA64_SGI_SN2" generic -choice 'Kernel page size' \ +if [ "$CONFIG_ITANIUM" = "y" ]; then + choice 'Kernel page size' \ + "4KB CONFIG_IA64_PAGE_SIZE_4KB \ + 8KB CONFIG_IA64_PAGE_SIZE_8KB \ + 16KB CONFIG_IA64_PAGE_SIZE_16KB" 16KB +else + choice 'Kernel page size' \ "4KB CONFIG_IA64_PAGE_SIZE_4KB \ 8KB CONFIG_IA64_PAGE_SIZE_8KB \ 16KB CONFIG_IA64_PAGE_SIZE_16KB \ 64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB +fi if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then define_bool CONFIG_ACPI y diff -Nru a/arch/ia64/hp/common/Makefile b/arch/ia64/hp/common/Makefile --- a/arch/ia64/hp/common/Makefile Thu May 30 21:28:59 2002 +++ b/arch/ia64/hp/common/Makefile Thu May 30 21:28:59 2002 @@ -12,17 +12,3 @@ obj-y := sba_iommu.o include $(TOPDIR)/Rules.make -# -# ia64/platform/hp/common/Makefile -# -# Copyright (C) 2002 Hewlett Packard -# Copyright (C) Alex Williamson (alex_williamson@hp.com) -# - -O_TARGET := common.o - -export-objs := sba_iommu.o - -obj-y := sba_iommu.o - -include $(TOPDIR)/Rules.make diff -Nru a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c --- a/arch/ia64/hp/common/sba_iommu.c Thu May 30 21:28:58 2002 +++ b/arch/ia64/hp/common/sba_iommu.c Thu May 30 21:28:58 2002 @@ -1389,6 +1389,12 @@ return ((unsigned long)sba_sg_iova(sg)); } +int +sba_dma_supported (struct pci_dev *dev, u64 mask) +{ + return 1; +} + /************************************************************** * * Initialization and claim @@ -1858,5 +1864,6 @@ EXPORT_SYMBOL(sba_map_sg); EXPORT_SYMBOL(sba_unmap_sg); EXPORT_SYMBOL(sba_dma_address); +EXPORT_SYMBOL(sba_dma_supported); EXPORT_SYMBOL(sba_alloc_consistent); EXPORT_SYMBOL(sba_free_consistent); diff -Nru a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c --- a/arch/ia64/hp/sim/simserial.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/hp/sim/simserial.c Thu May 30 21:28:59 2002 @@ -7,9 +7,9 @@ * case means sys_sim.c console (goes via the simulator). The code hereafter * is completely leveraged from the serial.c driver. * - * Copyright (C) 1999-2000 Hewlett-Packard Co - * Copyright (C) 1999 Stephane Eranian - * Copyright (C) 2000 David Mosberger-Tang + * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co + * Stephane Eranian + * David Mosberger-Tang * * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff -Nru a/arch/ia64/hp/zx1/Makefile b/arch/ia64/hp/zx1/Makefile --- a/arch/ia64/hp/zx1/Makefile Thu May 30 21:28:59 2002 +++ b/arch/ia64/hp/zx1/Makefile Thu May 30 21:28:59 2002 @@ -11,16 +11,3 @@ obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o include $(TOPDIR)/Rules.make -# -# ia64/platform/hp/zx1/Makefile -# -# Copyright (C) 2002 Hewlett Packard -# Copyright (C) Alex Williamson (alex_williamson@hp.com) -# - -O_TARGET := zx1.o - -obj-y := hpzx1_misc.o -obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o - -include $(TOPDIR)/Rules.make diff -Nru a/arch/ia64/hp/zx1/hpzx1_misc.c b/arch/ia64/hp/zx1/hpzx1_misc.c --- a/arch/ia64/hp/zx1/hpzx1_misc.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/hp/zx1/hpzx1_misc.c Thu May 30 21:28:59 2002 @@ -198,7 +198,6 @@ extern acpi_resource *acpi_get_crs_next(acpi_buffer *, int *); extern acpi_resource_data *acpi_get_crs_type(acpi_buffer *, int *, int); extern void acpi_dispose_crs(acpi_buffer *); -extern acpi_status acpi_cf_evaluate_method(acpi_handle, UINT8 *, NATIVE_UINT *); static acpi_status hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) @@ -388,407 +387,7 @@ } extern void sba_init(void); - -void -hpzx1_pci_fixup (int phase) -{ - if (phase == 0) - hpzx1_acpi_dev_init(); - iosapic_pci_fixup(phase); - if (phase == 1) - sba_init(); -} -/* - * Misc. support for HP zx1 chipset support - * - * Copyright (C) 2002 Hewlett-Packard Co - * Copyright (C) 2002 Alex Williamson - * Copyright (C) 2002 Bjorn Helgaas - */ - - -#include -#include -#include -#include -#include -#include -#include - -#include "../drivers/acpi/include/platform/acgcc.h" -#include "../drivers/acpi/include/actypes.h" -#include "../drivers/acpi/include/acexcep.h" -#include "../drivers/acpi/include/acpixf.h" -#include "../drivers/acpi/include/actbl.h" -#include "../drivers/acpi/include/acconfig.h" -#include "../drivers/acpi/include/acmacros.h" -#include "../drivers/acpi/include/aclocal.h" -#include "../drivers/acpi/include/acobject.h" -#include "../drivers/acpi/include/acstruct.h" -#include "../drivers/acpi/include/acnamesp.h" -#include "../drivers/acpi/include/acutils.h" - -#define PFX "hpzx1: " - -struct fake_pci_dev { - struct fake_pci_dev *next; - unsigned char bus; - unsigned int devfn; - int sizing; // in middle of BAR sizing operation? - unsigned long csr_base; - unsigned int csr_size; - unsigned long mapped_csrs; // ioremapped -}; - -static struct fake_pci_dev *fake_pci_head, **fake_pci_tail = &fake_pci_head; - -static struct pci_ops orig_pci_ops; - -static inline struct fake_pci_dev * -fake_pci_find_slot(unsigned char bus, unsigned int devfn) -{ - struct fake_pci_dev *dev; - - for (dev = fake_pci_head; dev; dev = dev->next) - if (dev->bus == bus && dev->devfn == devfn) - return dev; - return NULL; -} - -static struct fake_pci_dev * -alloc_fake_pci_dev(void) -{ - struct fake_pci_dev *dev; - - dev = kmalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return NULL; - - memset(dev, 0, sizeof(*dev)); - - *fake_pci_tail = dev; - fake_pci_tail = &dev->next; - - return dev; -} - -#define HP_CFG_RD(sz, bits, name) \ -static int hp_cfg_read##sz (struct pci_dev *dev, int where, u##bits *value) \ -{ \ - struct fake_pci_dev *fake_dev; \ - if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \ - return orig_pci_ops.name(dev, where, value); \ - \ - switch (where) { \ - case PCI_COMMAND: \ - *value = read##sz(fake_dev->mapped_csrs + where); \ - *value |= PCI_COMMAND_MEMORY; /* SBA omits this */ \ - break; \ - case PCI_BASE_ADDRESS_0: \ - if (fake_dev->sizing) \ - *value = ~(fake_dev->csr_size - 1); \ - else \ - *value = (fake_dev->csr_base & \ - PCI_BASE_ADDRESS_MEM_MASK) | \ - PCI_BASE_ADDRESS_SPACE_MEMORY; \ - fake_dev->sizing = 0; \ - break; \ - default: \ - *value = read##sz(fake_dev->mapped_csrs + where); \ - break; \ - } \ - return PCIBIOS_SUCCESSFUL; \ -} - -#define HP_CFG_WR(sz, bits, name) \ -static int hp_cfg_write##sz (struct pci_dev *dev, int where, u##bits value) \ -{ \ - struct fake_pci_dev *fake_dev; \ - if (!(fake_dev = fake_pci_find_slot(dev->bus->number, dev->devfn))) \ - return orig_pci_ops.name(dev, where, value); \ - \ - switch (where) { \ - case PCI_BASE_ADDRESS_0: \ - if (value == ~0) \ - fake_dev->sizing = 1; \ - break; \ - default: \ - write##sz(value, fake_dev->mapped_csrs + where); \ - break; \ - } \ - return PCIBIOS_SUCCESSFUL; \ -} - -HP_CFG_RD(b, 8, read_byte) -HP_CFG_RD(w, 16, read_word) -HP_CFG_RD(l, 32, read_dword) -HP_CFG_WR(b, 8, write_byte) -HP_CFG_WR(w, 16, write_word) -HP_CFG_WR(l, 32, write_dword) - -static struct pci_ops hp_pci_conf = { - hp_cfg_readb, - hp_cfg_readw, - hp_cfg_readl, - hp_cfg_writeb, - hp_cfg_writew, - hp_cfg_writel, -}; - -/* - * Assume we'll never have a physical slot higher than 0x10, so we can - * use slots above that for "fake" PCI devices to represent things - * that only show up in the ACPI namespace. - */ -#define HP_MAX_SLOT 0x10 - -static struct fake_pci_dev * -hpzx1_fake_pci_dev(unsigned long addr, unsigned int bus, unsigned int size) -{ - struct fake_pci_dev *dev; - int slot; - - // Note: lspci thinks 0x1f is invalid - for (slot = 0x1e; slot > HP_MAX_SLOT; slot--) { - if (!fake_pci_find_slot(bus, PCI_DEVFN(slot, 0))) - break; - } - if (slot == HP_MAX_SLOT) { - printk(KERN_ERR PFX - "no slot space for device (0x%p) on bus 0x%02x\n", - (void *) addr, bus); - return NULL; - } - - dev = alloc_fake_pci_dev(); - if (!dev) { - printk(KERN_ERR PFX - "no memory for device (0x%p) on bus 0x%02x\n", - (void *) addr, bus); - return NULL; - } - - dev->bus = bus; - dev->devfn = PCI_DEVFN(slot, 0); - dev->csr_base = addr; - dev->csr_size = size; - - /* - * Drivers should ioremap what they need, but we have to do - * it here, too, so PCI config accesses work. - */ - dev->mapped_csrs = (unsigned long) ioremap(dev->csr_base, dev->csr_size); - - return dev; -} - -typedef struct { - u8 guid_id; - u8 guid[16]; - u8 csr_base[8]; - u8 csr_length[8]; -} acpi_hp_vendor_long; - -#define HP_CCSR_LENGTH 0x21 -#define HP_CCSR_TYPE 0x2 -#define HP_CCSR_GUID \ - ((efi_guid_t) { 0x69e9adf9, 0x924f, 0xab5f, { 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }}) - -extern acpi_status acpi_get_crs(acpi_handle, acpi_buffer *); -extern acpi_resource *acpi_get_crs_next(acpi_buffer *, int *); -extern acpi_resource_data *acpi_get_crs_type(acpi_buffer *, int *, int); -extern void acpi_dispose_crs(acpi_buffer *); -extern acpi_status acpi_cf_evaluate_method(acpi_handle, UINT8 *, NATIVE_UINT *); - -static acpi_status -hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) -{ - int i, offset = 0; - acpi_status status; - acpi_buffer buf; - acpi_resource_vendor *res; - acpi_hp_vendor_long *hp_res; - efi_guid_t vendor_guid; - - *csr_base = 0; - *csr_length = 0; - - status = acpi_get_crs(obj, &buf); - if (status != AE_OK) { - printk(KERN_ERR PFX "Unable to get _CRS data on object\n"); - return status; - } - - res = (acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR); - if (!res) { - printk(KERN_ERR PFX "Failed to find config space for device\n"); - acpi_dispose_crs(&buf); - return AE_NOT_FOUND; - } - - hp_res = (acpi_hp_vendor_long *)(res->reserved); - - if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) { - printk(KERN_ERR PFX "Unknown Vendor data\n"); - acpi_dispose_crs(&buf); - return AE_TYPE; /* Revisit error? */ - } - - memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t)); - if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) { - printk(KERN_ERR PFX "Vendor GUID does not match\n"); - acpi_dispose_crs(&buf); - return AE_TYPE; /* Revisit error? */ - } - - for (i = 0 ; i < 8 ; i++) { - *csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8)); - *csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8)); - } - - acpi_dispose_crs(&buf); - - return AE_OK; -} - -static acpi_status -hpzx1_sba_probe(acpi_handle obj, u32 depth, void *context, void **ret) -{ - u64 csr_base = 0, csr_length = 0; - char *name = context; - struct fake_pci_dev *dev; - acpi_status status; - - status = hp_csr_space(obj, &csr_base, &csr_length); - - if (status != AE_OK) - return status; - - /* - * Only SBA shows up in ACPI namespace, so its CSR space - * includes both SBA and IOC. Make SBA and IOC show up - * separately in PCI space. - */ - if ((dev = hpzx1_fake_pci_dev(csr_base, 0, 0x1000))) - printk(KERN_INFO PFX "%s SBA at 0x%lx; pci dev %02x:%02x.%d\n", - name, csr_base, dev->bus, - PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); - if ((dev = hpzx1_fake_pci_dev(csr_base + 0x1000, 0, 0x1000))) - printk(KERN_INFO PFX "%s IOC at 0x%lx; pci dev %02x:%02x.%d\n", - name, csr_base + 0x1000, dev->bus, - PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); - - return AE_OK; -} - -static acpi_status -hpzx1_lba_probe(acpi_handle obj, u32 depth, void *context, void **ret) -{ - acpi_status status; - u64 csr_base = 0, csr_length = 0; - char *name = context; - NATIVE_UINT busnum = 0; - struct fake_pci_dev *dev; - - status = hp_csr_space(obj, &csr_base, &csr_length); - - if (status != AE_OK) - return status; - - status = acpi_cf_evaluate_method(obj, METHOD_NAME__BBN, &busnum); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PFX "evaluate _BBN fail=0x%x\n", status); - busnum = 0; // no _BBN; stick it on bus 0 - } - - if ((dev = hpzx1_fake_pci_dev(csr_base, busnum, csr_length))) - printk(KERN_INFO PFX "%s LBA at 0x%lx, _BBN 0x%02x; " - "pci dev %02x:%02x.%d\n", - name, csr_base, busnum, dev->bus, - PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); - - return AE_OK; -} - -static void -hpzx1_acpi_dev_init(void) -{ - extern struct pci_ops pci_conf; - - /* - * Make fake PCI devices for the following hardware in the - * ACPI namespace. This makes it more convenient for drivers - * because they can claim these devices based on PCI - * information, rather than needing to know about ACPI. The - * 64-bit "HPA" space for this hardware is available as BAR - * 0/1. - * - * HWP0001: Single IOC SBA w/o IOC in namespace - * HWP0002: LBA device - * HWP0003: AGP LBA device - */ - acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL); -#ifdef CONFIG_IA64_HP_PROTO - if (fake_pci_tail != &fake_pci_head) { -#endif - acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002", NULL); - acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003", NULL); - -#ifdef CONFIG_IA64_HP_PROTO - } - -#define ZX1_FUNC_ID_VALUE (PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP - /* - * Early protos don't have bridges in the ACPI namespace, so - * if we didn't find anything, add the things we know are - * there. - */ - if (fake_pci_tail == &fake_pci_head) { - u64 hpa, csr_base; - struct fake_pci_dev *dev; - csr_base = 0xfed00000UL; - hpa = (u64) ioremap(csr_base, 0x1000); - if (__raw_readl(hpa) == ZX1_FUNC_ID_VALUE) { - if ((dev = hpzx1_fake_pci_dev(csr_base, 0, 0x1000))) - printk(KERN_INFO PFX "HWP0001 SBA at 0x%lx; " - "pci dev %02x:%02x.%d\n", csr_base, - dev->bus, PCI_SLOT(dev->devfn), - PCI_FUNC(dev->devfn)); - if ((dev = hpzx1_fake_pci_dev(csr_base + 0x1000, 0, - 0x1000))) - printk(KERN_INFO PFX "HWP0001 IOC at 0x%lx; " - "pci dev %02x:%02x.%d\n", - csr_base + 0x1000, - dev->bus, PCI_SLOT(dev->devfn), - PCI_FUNC(dev->devfn)); - - csr_base = 0xfed24000UL; - iounmap(hpa); - hpa = (u64) ioremap(csr_base, 0x1000); - if ((dev = hpzx1_fake_pci_dev(csr_base, 0x40, 0x1000))) - printk(KERN_INFO PFX "HWP0003 AGP LBA at " - "0x%lx; pci dev %02x:%02x.%d\n", - csr_base, - dev->bus, PCI_SLOT(dev->devfn), - PCI_FUNC(dev->devfn)); - } - iounmap(hpa); - } -#endif - - if (fake_pci_tail == &fake_pci_head) - return; - - /* - * Replace PCI ops, but only if we made fake devices. - */ - orig_pci_ops = pci_conf; - pci_conf = hp_pci_conf; -} - -extern void sba_init(void); - void hpzx1_pci_fixup (int phase) { diff -Nru a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S --- a/arch/ia64/ia32/ia32_entry.S Thu May 30 21:28:59 2002 +++ b/arch/ia64/ia32/ia32_entry.S Thu May 30 21:28:59 2002 @@ -40,12 +40,18 @@ zxt4 out1=in1 // newsp mov out3=16 // stacksize (compensates for 16-byte scratch area) adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s - zxt4 out0=in0 // out0 = clone_flags - br.call.sptk.many rp=do_fork_WITHOUT_CLONE_IDLETASK // FIXME: mask out CLONE_IDLETASK from flags, and return value now task_struct *. + dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK + br.call.sptk.many rp=do_fork .ret0: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack + mov r2=-1000 + adds r3=IA64_TASK_PID_OFFSET,r8 + ;; + cmp.leu p6,p0=r8,r2 mov ar.pfs=loc1 mov rp=loc0 + ;; +(p6) ld4 r8=[r3] br.ret.sptk.many rp END(ia32_clone) @@ -167,11 +173,17 @@ mov out1=0 mov out3=0 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s - br.call.sptk.few rp=do_fork_FIXME_RETURNS_TASK_STRUCT -.ret5: mov ar.pfs=loc1 - .restore sp + br.call.sptk.few rp=do_fork +.ret5: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack + mov r2=-1000 + adds r3=IA64_TASK_PID_OFFSET,r8 + ;; + cmp.leu p6,p0=r8,r2 + mov ar.pfs=loc1 mov rp=loc0 + ;; +(p6) ld4 r8=[r3] br.ret.sptk.many rp END(sys32_fork) diff -Nru a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c --- a/arch/ia64/kernel/acpi.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/acpi.c Thu May 30 21:28:59 2002 @@ -3,8 +3,8 @@ * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999,2000 Walt Drummond - * Copyright (C) 2000 Hewlett-Packard Co. - * Copyright (C) 2000 David Mosberger-Tang + * Copyright (C) 2000, 2002 Hewlett-Packard Co. + * David Mosberger-Tang * Copyright (C) 2000 Intel Corp. * Copyright (C) 2000,2001 J.I. Lee * Copyright (C) 2001 Paul Diefenbaugh @@ -173,7 +173,7 @@ #define ACPI_MAX_PLATFORM_IRQS 256 /* Array to record platform interrupt vectors for generic interrupt routing. */ -int platform_irq_list[ACPI_MAX_PLATFORM_IRQS]; +int platform_irq_list[ACPI_MAX_PLATFORM_IRQS] = { [0 ... ACPI_MAX_PLATFORM_IRQS - 1] = -1 }; /* * Interrupt routing API for device drivers. Provides interrupt vector for @@ -421,8 +421,6 @@ static int __init acpi_parse_madt (unsigned long phys_addr, unsigned long size) { - int i = 0; - if (!phys_addr || !size) return -EINVAL; @@ -432,11 +430,6 @@ return -ENODEV; } - /* Initialize platform interrupt vector array */ - - for (i = 0; i < ACPI_MAX_PLATFORM_IRQS; i++) - platform_irq_list[i] = -1; - /* Get base address of IPI Message Block */ if (acpi_madt->lapic_address) @@ -643,7 +636,7 @@ *vectors = NULL; *count = 0; - if (acpi_prts.count < 0) { + if (acpi_prts.count <= 0) { printk(KERN_ERR PREFIX "No PCI IRQ routing entries\n"); return -ENODEV; } @@ -660,10 +653,10 @@ list_for_each(node, &acpi_prts.entries) { entry = (struct acpi_prt_entry *)node; - vector[i].bus = (u16) entry->id.bus; - vector[i].pci_id = (u32) entry->id.dev << 16 | 0xffff; - vector[i].pin = (u8) entry->id.pin; - vector[i].irq = (u8) entry->source.index; + vector[i].bus = entry->id.bus; + vector[i].pci_id = ((u32) entry->id.dev << 16) | 0xffff; + vector[i].pin = entry->id.pin; + vector[i].irq = entry->source.index; i++; } *count = acpi_prts.count; diff -Nru a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c --- a/arch/ia64/kernel/efi.c Thu May 30 21:28:58 2002 +++ b/arch/ia64/kernel/efi.c Thu May 30 21:28:58 2002 @@ -5,7 +5,7 @@ * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond - * Copyright (C) 1999-2001 Hewlett-Packard Co. + * Copyright (C) 1999-2002 Hewlett-Packard Co. * David Mosberger-Tang * Stephane Eranian * @@ -212,8 +212,8 @@ void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; - int pal_code_count=0; - u64 mask, flags; + int pal_code_count = 0; + u64 mask, psr; u64 vaddr; efi_map_start = __va(ia64_boot_param->efi_memmap); @@ -266,10 +266,10 @@ /* * Cannot write to CRx with PSR.ic=1 */ - ia64_clear_ic(flags); + psr = ia64_clear_ic(); ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask, pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); - local_irq_restore(flags); + ia64_set_psr(psr); ia64_srlz_i(); } } @@ -485,7 +485,7 @@ } u32 -efi_mem_type (u64 phys_addr) +efi_mem_type (unsigned long phys_addr) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; @@ -506,7 +506,7 @@ } u64 -efi_mem_attributes (u64 phys_addr) +efi_mem_attributes (unsigned long phys_addr) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; diff -Nru a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S --- a/arch/ia64/kernel/efi_stub.S Thu May 30 21:28:58 2002 +++ b/arch/ia64/kernel/efi_stub.S Thu May 30 21:28:58 2002 @@ -53,23 +53,21 @@ mov loc4=ar.rsc // save RSE configuration mov ar.rsc=0 // put RSE in enforced lazy, LE mode ;; - ld8 gp=[in0] // load EFI function's global pointer - mov out0=in1 - mov out1=in2 movl r16=PSR_BITS_TO_CLEAR - mov loc3=psr // save processor status word movl r17=PSR_BITS_TO_SET ;; - mov out2=in3 or loc3=loc3,r17 mov b6=r2 ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared - mov out3=in4 br.call.sptk.many rp=ia64_switch_mode .ret0: mov out4=in5 + mov out0=in1 + mov out1=in2 + mov out2=in3 + mov out3=in4 mov out5=in6 mov out6=in7 br.call.sptk.many rp=b6 // call the EFI function diff -Nru a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S --- a/arch/ia64/kernel/entry.S Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/entry.S Thu May 30 21:28:59 2002 @@ -100,12 +100,18 @@ mov out1=in1 mov out3=in2 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s - mov out0=in0 // out0 = clone_flags - br.call.sptk.many rp=do_fork_WITHOUT_CLONE_IDLETASK // FIXME: mask out CLONE_IDLETASK from flags, and now returns task_struct *. + dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK + br.call.sptk.many rp=do_fork .ret1: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack + mov r2=-1000 + adds r3=IA64_TASK_PID_OFFSET,r8 + ;; + cmp.leu p6,p0=r8,r2 mov ar.pfs=loc1 mov rp=loc0 + ;; +(p6) ld4 r8=[r3] br.ret.sptk.many rp END(sys_clone2) @@ -119,12 +125,18 @@ mov out1=in1 mov out3=16 // stacksize (compensates for 16-byte scratch area) adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s - mov out0=in0 // out0 = clone_flags - br.call.sptk.many rp=do_fork_WITHOUT_CLONE_IDLETASK // FIXME: mask out CLONE_IDLETASK from flags, and now return task_struct *. + dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK + br.call.sptk.many rp=do_fork .ret2: .restore sp adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack + mov r2=-1000 + adds r3=IA64_TASK_PID_OFFSET,r8 + ;; + cmp.leu p6,p0=r8,r2 mov ar.pfs=loc1 mov rp=loc0 + ;; +(p6) ld4 r8=[r3] br.ret.sptk.many rp END(sys_clone) diff -Nru a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S --- a/arch/ia64/kernel/gate.S Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/gate.S Thu May 30 21:28:59 2002 @@ -13,7 +13,7 @@ #include #include - .section .text.gate,"ax" + .section .text.gate, "ax" # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) @@ -108,7 +108,7 @@ dep r8=0,r8,38,26 // clear EC0, CPL0 and reserved bits adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp ;; - .spillsp ar.pfs, CFM_OFF + .spillsp ar.pfs, CFM_OFF+SIGCONTEXT_OFF st8 [base0]=r8 // save CFM0 adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp ;; diff -Nru a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c --- a/arch/ia64/kernel/iosapic.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/iosapic.c Thu May 30 21:28:59 2002 @@ -24,6 +24,8 @@ * /proc/irq/#/smp_affinity * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq + * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to IOSAPIC mapping + * error */ /* * Here is what the interrupt logic between a PCI device and the CPU looks like: @@ -112,7 +114,7 @@ int i; for (i = 0; i < num_iosapic; i++) { - if ((irq - iosapic_lists[i].base_irq) < iosapic_lists[i].max_pin) + if ((unsigned) (irq - iosapic_lists[i].base_irq) <= iosapic_lists[i].max_pin) return i; } @@ -138,7 +140,7 @@ * Map PCI pin to the corresponding IA-64 interrupt vector. If no such mapping exists, * return -1. */ -static int +int pci_pin_to_vector (int bus, int slot, int pci_pin) { struct pci_vector_struct *r; @@ -630,7 +632,7 @@ } } -void __init +static void __init iosapic_init_pci_irq (void) { int i, index, vector, pin; diff -Nru a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c --- a/arch/ia64/kernel/irq.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/irq.c Thu May 30 21:28:59 2002 @@ -1197,7 +1197,7 @@ { char name [MAX_NAMELEN]; - if (!root_irq_dir || (irq_desc(irq)->handler == &no_irq_type)) + if (!root_irq_dir || (irq_desc(irq)->handler == &no_irq_type) || irq_dir[irq]) return; memset(name, 0, MAX_NAMELEN); diff -Nru a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S --- a/arch/ia64/kernel/pal.S Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/pal.S Thu May 30 21:28:59 2002 @@ -216,7 +216,7 @@ mov out3 = in3 // copy arg3 ;; mov loc3 = psr // save psr - ;; + ;; mov loc4=ar.rsc // save RSE configuration dep.z loc2=loc2,0,61 // convert pal entry point to physical ;; diff -Nru a/arch/ia64/kernel/pci.c b/arch/ia64/kernel/pci.c --- a/arch/ia64/kernel/pci.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/pci.c Thu May 30 21:28:59 2002 @@ -199,7 +199,7 @@ return; } -void __init +static int __init pcibios_init (void) { # define PCI_BUSES_TO_SCAN 255 @@ -218,9 +218,10 @@ pci_scan_bus(i, pci_root_ops, NULL); platform_pci_fixup(1); /* phase 1 fixups (after buses scanned) */ - - return; + return 0; } + +subsys_initcall(pcibios_init); /* * Called after each bus is probed, but before its children diff -Nru a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c --- a/arch/ia64/kernel/perfmon.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/perfmon.c Thu May 30 21:28:59 2002 @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -38,11 +39,11 @@ #ifdef CONFIG_PERFMON /* - * For PMUs which rely on the debug registers for some features, you - * must enable the following flag to activate the support for + * For PMUs which rely on the debug registers for some features, you must + * you must enable the following flag to activate the support for * accessing the registers via the perfmonctl() interface. */ -#ifdef CONFIG_ITANIUM +#if defined(CONFIG_ITANIUM) || defined(CONFIG_MCKINLEY) #define PFM_PMU_USES_DBR 1 #endif @@ -68,26 +69,27 @@ #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_soft_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY) #define PFM_FL_INHERIT_MASK (PFM_FL_INHERIT_NONE|PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL) +/* i assume unsigned */ #define PMC_IS_IMPL(i) (i>6] & (1UL<< (i) %64)) #define PMD_IS_IMPL(i) (i>6)] & (1UL<<(i) % 64)) -#define PMD_IS_COUNTING(i) (i >=0 && i < 256 && pmu_conf.counter_pmds[i>>6] & (1UL <<(i) % 64)) -#define PMC_IS_COUNTING(i) PMD_IS_COUNTING(i) +/* XXX: these three assume that register i is implemented */ +#define PMD_IS_COUNTING(i) (pmu_conf.pmd_desc[i].type == PFM_REG_COUNTING) +#define PMC_IS_COUNTING(i) (pmu_conf.pmc_desc[i].type == PFM_REG_COUNTING) +#define PMC_IS_MONITOR(c) (pmu_conf.pmc_desc[i].type == PFM_REG_MONITOR) +/* k assume unsigned */ #define IBR_IS_IMPL(k) (kpmc_es == PMU_BTB_EVENT) - -#define LSHIFT(x) (1UL<<(x)) -#define PMM(x) LSHIFT(x) -#define PMC_IS_MONITOR(c) ((pmu_conf.monitor_pmcs[0] & PMM((c))) != 0) - #define CTX_IS_ENABLED(c) ((c)->ctx_flags.state == PFM_CTX_ENABLED) #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0) #define CTX_INHERIT_MODE(c) ((c)->ctx_fl_inherit) #define CTX_HAS_SMPL(c) ((c)->ctx_psb != NULL) -#define CTX_USED_PMD(ctx,n) (ctx)->ctx_used_pmds[(n)>>6] |= 1UL<< ((n) % 64) +/* XXX: does not support more than 64 PMDs */ +#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask) +#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL) + #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64) #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64) @@ -104,17 +106,29 @@ #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) +#ifdef CONFIG_SMP +#define cpu_is_online(i) (cpu_online_map & (1UL << i)) +#else +#define cpu_is_online(i) (i==0) +#endif + /* * debugging */ #define DBprintk(a) \ do { \ - if (pfm_debug_mode >0) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ + if (pfm_sysctl.debug >0) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ + } while (0) + +#define DBprintk_ovfl(a) \ + do { \ + if (pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ } while (0) + /* - * These are some helpful architected PMC and IBR/DBR register layouts + * Architected PMC structure */ typedef struct { unsigned long pmc_plm:4; /* privilege level mask */ @@ -139,41 +153,40 @@ typedef struct _pfm_smpl_buffer_desc { spinlock_t psb_lock; /* protection lock */ unsigned long psb_refcnt; /* how many users for the buffer */ - int psb_flags; /* bitvector of flags */ + int psb_flags; /* bitvector of flags (not yet used) */ void *psb_addr; /* points to location of first entry */ unsigned long psb_entries; /* maximum number of entries */ unsigned long psb_size; /* aligned size of buffer */ unsigned long psb_index; /* next free entry slot XXX: must use the one in buffer */ unsigned long psb_entry_size; /* size of each entry including entry header */ + perfmon_smpl_hdr_t *psb_hdr; /* points to sampling buffer header */ struct _pfm_smpl_buffer_desc *psb_next; /* next psb, used for rvfreeing of psb_hdr */ } pfm_smpl_buffer_desc_t; +/* + * psb_flags + */ +#define PSB_HAS_VMA 0x1 /* a virtual mapping for the buffer exists */ + #define LOCK_PSB(p) spin_lock(&(p)->psb_lock) #define UNLOCK_PSB(p) spin_unlock(&(p)->psb_lock) -#define PFM_PSB_VMA 0x1 /* a VMA is describing the buffer */ - /* - * This structure is initialized at boot time and contains - * a description of the PMU main characteristic as indicated - * by PAL + * The possible type of a PMU register */ -typedef struct { - unsigned long pfm_is_disabled; /* indicates if perfmon is working properly */ - unsigned long perf_ovfl_val; /* overflow value for generic counters */ - unsigned long max_counters; /* upper limit on counter pair (PMC/PMD) */ - unsigned long num_pmcs ; /* highest PMC implemented (may have holes) */ - unsigned long num_pmds; /* highest PMD implemented (may have holes) */ - unsigned long impl_regs[16]; /* buffer used to hold implememted PMC/PMD mask */ - unsigned long num_ibrs; /* number of instruction debug registers */ - unsigned long num_dbrs; /* number of data debug registers */ - unsigned long monitor_pmcs[4]; /* which pmc are controlling monitors */ - unsigned long counter_pmds[4]; /* which pmd are used as counters */ -} pmu_config_t; +typedef enum { + PFM_REG_NOTIMPL, /* not implemented */ + PFM_REG_NONE, /* end marker */ + PFM_REG_MONITOR, /* a PMC with a pmc.pm field only */ + PFM_REG_COUNTING,/* a PMC with a pmc.pm AND pmc.oi, a PMD used as a counter */ + PFM_REG_CONTROL, /* PMU control register */ + PFM_REG_CONFIG, /* refine configuration */ + PFM_REG_BUFFER /* PMD used as buffer */ +} pfm_pmu_reg_type_t; /* * 64-bit software counter structure @@ -221,9 +234,11 @@ struct semaphore ctx_restart_sem; /* use for blocking notification mode */ - unsigned long ctx_used_pmds[4]; /* bitmask of used PMD (speedup ctxsw) */ - unsigned long ctx_saved_pmcs[4]; /* bitmask of PMC to save on ctxsw */ - unsigned long ctx_reload_pmcs[4]; /* bitmask of PMC to reload on ctxsw (SMP) */ + unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ + unsigned long ctx_reload_pmds[4]; /* bitmask of PMD to reload on ctxsw */ + + unsigned long ctx_used_pmcs[4]; /* bitmask PMC used by context */ + unsigned long ctx_reload_pmcs[4]; /* bitmask of PMC to reload on ctxsw */ unsigned long ctx_used_ibrs[4]; /* bitmask of used IBR (speedup ctxsw) */ unsigned long ctx_used_dbrs[4]; /* bitmask of used DBR (speedup ctxsw) */ @@ -235,6 +250,7 @@ unsigned long ctx_cpu; /* cpu to which perfmon is applied (system wide) */ atomic_t ctx_saving_in_progress; /* flag indicating actual save in progress */ + atomic_t ctx_is_busy; /* context accessed by overflow handler */ atomic_t ctx_last_cpu; /* CPU id of current or last CPU used */ } pfm_context_t; @@ -250,16 +266,54 @@ * mostly used to synchronize between system wide and per-process */ typedef struct { - spinlock_t pfs_lock; /* lock the structure */ + spinlock_t pfs_lock; /* lock the structure */ - unsigned long pfs_task_sessions; /* number of per task sessions */ - unsigned long pfs_sys_sessions; /* number of per system wide sessions */ - unsigned long pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */ - unsigned long pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */ - struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ + unsigned long pfs_task_sessions; /* number of per task sessions */ + unsigned long pfs_sys_sessions; /* number of per system wide sessions */ + unsigned long pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */ + unsigned long pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */ + struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ } pfm_session_t; /* + * information about a PMC or PMD. + * dep_pmd[]: a bitmask of dependent PMD registers + * dep_pmc[]: a bitmask of dependent PMC registers + */ +typedef struct { + pfm_pmu_reg_type_t type; + int pm_pos; + int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs); + int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs); + unsigned long dep_pmd[4]; + unsigned long dep_pmc[4]; +} pfm_reg_desc_t; +/* assume cnum is a valid monitor */ +#define PMC_PM(cnum, val) (((val) >> (pmu_conf.pmc_desc[cnum].pm_pos)) & 0x1) +#define PMC_WR_FUNC(cnum) (pmu_conf.pmc_desc[cnum].write_check) +#define PMD_WR_FUNC(cnum) (pmu_conf.pmd_desc[cnum].write_check) +#define PMD_RD_FUNC(cnum) (pmu_conf.pmd_desc[cnum].read_check) + +/* + * This structure is initialized at boot time and contains + * a description of the PMU main characteristic as indicated + * by PAL along with a list of inter-registers dependencies and configurations. + */ +typedef struct { + unsigned long pfm_is_disabled; /* indicates if perfmon is working properly */ + unsigned long perf_ovfl_val; /* overflow value for generic counters */ + unsigned long max_counters; /* upper limit on counter pair (PMC/PMD) */ + unsigned long num_pmcs ; /* highest PMC implemented (may have holes) */ + unsigned long num_pmds; /* highest PMD implemented (may have holes) */ + unsigned long impl_regs[16]; /* buffer used to hold implememted PMC/PMD mask */ + unsigned long num_ibrs; /* number of instruction debug registers */ + unsigned long num_dbrs; /* number of data debug registers */ + pfm_reg_desc_t *pmc_desc; /* detailed PMC register descriptions */ + pfm_reg_desc_t *pmd_desc; /* detailed PMD register descriptions */ +} pmu_config_t; + + +/* * structure used to pass argument to/from remote CPU * using IPI to check and possibly save the PMU context on SMP systems. * @@ -301,22 +355,52 @@ #define PFM_CMD_NARG(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_narg) #define PFM_CMD_ARG_SIZE(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_argsize) +typedef struct { + int debug; /* turn on/off debugging via syslog */ + int debug_ovfl; /* turn on/off debug printk in overflow handler */ + int fastctxsw; /* turn on/off fast (unsecure) ctxsw */ +} pfm_sysctl_t; + +typedef struct { + unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ + unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ + unsigned long pfm_recorded_samples_count; + unsigned long pfm_full_smpl_buffer_count; /* how many times the sampling buffer was full */ +} pfm_stats_t; /* * perfmon internal variables */ static pmu_config_t pmu_conf; /* PMU configuration */ -static int pfm_debug_mode; /* 0= nodebug, >0= debug output on */ static pfm_session_t pfm_sessions; /* global sessions information */ static struct proc_dir_entry *perfmon_dir; /* for debug only */ -static unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ -static unsigned long pfm_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ -static unsigned long pfm_recorded_samples_count; - +static pfm_stats_t pfm_stats; +int __per_cpu_data pfm_syst_wide; +static int __per_cpu_data pfm_dcr_pp; + +/* sysctl() controls */ +static pfm_sysctl_t pfm_sysctl; + +static ctl_table pfm_ctl_table[]={ + {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, + {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, + {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, + { 0, }, +}; +static ctl_table pfm_sysctl_dir[] = { + {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, }, + {0,}, +}; +static ctl_table pfm_sysctl_root[] = { + {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, }, + {0,}, +}; +static struct ctl_table_header *pfm_sysctl_header; static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values for PMCS */ static void pfm_vm_close(struct vm_area_struct * area); + static struct vm_operations_struct pfm_vm_ops={ close: pfm_vm_close }; @@ -339,6 +423,14 @@ #endif static void pfm_lazy_save_regs (struct task_struct *ta); +#if defined(CONFIG_ITANIUM) +#include "perfmon_itanium.h" +#elif defined(CONFIG_MCKINLEY) +#include "perfmon_mckinley.h" +#else +#include "perfmon_generic.h" +#endif + static inline unsigned long pfm_read_soft_counter(pfm_context_t *ctx, int i) { @@ -353,7 +445,7 @@ * writing to unimplemented part is ignore, so we do not need to * mask off top part */ - ia64_set_pmd(i, val); + ia64_set_pmd(i, val & pmu_conf.perf_ovfl_val); } /* @@ -388,7 +480,8 @@ } /* Here we want the physical address of the memory. - * This is used when initializing the contents of the area. + * This is used when initializing the contents of the + * area and marking the pages as reserved. */ static inline unsigned long pfm_kvirt_to_pa(unsigned long adr) @@ -398,7 +491,6 @@ return pa; } - static void * pfm_rvmalloc(unsigned long size) { @@ -473,7 +565,7 @@ * * This function cannot remove the buffer from here, because exit_mmap() must first * complete. Given that there is no other vma related callback in the generic code, - * we have created on own with the linked list of sampling buffer to free which + * we have created our own with the linked list of sampling buffers to free. The list * is part of the thread structure. In release_thread() we check if the list is * empty. If not we call into perfmon to free the buffer and psb. That is the only * way to ensure a safe deallocation of the sampling buffer which works when @@ -489,16 +581,15 @@ psb->psb_next = current->thread.pfm_smpl_buf_list; current->thread.pfm_smpl_buf_list = psb; - DBprintk(("psb for [%d] smpl @%p size %ld inserted into list\n", - current->pid, psb->psb_hdr, psb->psb_size)); + DBprintk(("[%d] add smpl @%p size %lu to smpl_buf_list psb_flags=0x%x\n", + current->pid, psb->psb_hdr, psb->psb_size, psb->psb_flags)); } - DBprintk(("psb vma flag cleared for [%d] smpl @%p size %ld inserted into list\n", - current->pid, psb->psb_hdr, psb->psb_size)); - + DBprintk(("[%d] clearing psb_flags=0x%x smpl @%p size %lu\n", + current->pid, psb->psb_flags, psb->psb_hdr, psb->psb_size)); /* - * indicate to pfm_context_exit() that the vma has been removed. + * decrement the number vma for the buffer */ - psb->psb_flags &= ~PFM_PSB_VMA; + psb->psb_flags &= ~PSB_HAS_VMA; UNLOCK_PSB(psb); } @@ -521,7 +612,7 @@ printk("perfmon: invalid context mm=%p\n", task->mm); return -1; } - psb = ctx->ctx_psb; + psb = ctx->ctx_psb; down_write(&task->mm->mmap_sem); @@ -532,14 +623,9 @@ printk("perfmon: pid %d unable to unmap sampling buffer @0x%lx size=%ld\n", task->pid, ctx->ctx_smpl_vaddr, psb->psb_size); } - DBprintk(("[%d] do_unmap(0x%lx, %ld)=%d\n", - task->pid, ctx->ctx_smpl_vaddr, psb->psb_size, r)); - /* - * make sure we suppress all traces of this buffer - * (important for pfm_inherit) - */ - ctx->ctx_smpl_vaddr = 0; + DBprintk(("[%d] do_unmap(0x%lx, %ld)=%d refcnt=%lu psb_flags=0x%x\n", + task->pid, ctx->ctx_smpl_vaddr, psb->psb_size, r, psb->psb_refcnt, psb->psb_flags)); return 0; } @@ -572,7 +658,7 @@ while (size > 0) { page = pfm_kvirt_to_pa(buf); - if (remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_SHARED)) return -ENOMEM; + if (remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM; addr += PAGE_SIZE; buf += PAGE_SIZE; @@ -611,17 +697,25 @@ void *smpl_buf; pfm_smpl_buffer_desc_t *psb; - regcount = pfm_smpl_entry_size(which_pmds, 1); /* note that regcount might be 0, in this case only the header for each * entry will be recorded. */ + regcount = pfm_smpl_entry_size(which_pmds, 1); + + if ((sizeof(perfmon_smpl_hdr_t)+ entries*sizeof(perfmon_smpl_entry_t)) <= entries) { + DBprintk(("requested entries %lu is too big\n", entries)); + return -EINVAL; + } /* * 1 buffer hdr and for each entry a header + regcount PMDs to save */ size = PAGE_ALIGN( sizeof(perfmon_smpl_hdr_t) + entries * (sizeof(perfmon_smpl_entry_t) + regcount*sizeof(u64))); + + DBprintk(("sampling buffer size=%lu bytes\n", size)); + /* * check requested size to avoid Denial-of-service attacks * XXX: may have to refine this test @@ -661,8 +755,13 @@ } /* * partially initialize the vma for the sampling buffer + * + * The VM_DONTCOPY flag is very important as it ensures that the mapping + * will never be inherited for any child process (via fork()) which is always + * what we want. */ - vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; + vma->vm_mm = mm; + vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED|VM_DONTCOPY; vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ vma->vm_ops = &pfm_vm_ops; /* necesarry to get the close() callback */ vma->vm_pgoff = 0; @@ -680,8 +779,8 @@ psb->psb_size = size; /* aligned size */ psb->psb_index = 0; psb->psb_entries = entries; - psb->psb_flags = PFM_PSB_VMA; /* remember that there is a vma describing the buffer */ psb->psb_refcnt = 1; + psb->psb_flags = PSB_HAS_VMA; spin_lock_init(&psb->psb_lock); @@ -691,9 +790,9 @@ */ psb->psb_entry_size = sizeof(perfmon_smpl_entry_t) + regcount*sizeof(u64); - DBprintk(("psb @%p entry_size=%ld hdr=%p addr=%p\n", + DBprintk(("psb @%p entry_size=%ld hdr=%p addr=%p refcnt=%lu psb_flags=0x%x\n", (void *)psb,psb->psb_entry_size, (void *)psb->psb_hdr, - (void *)psb->psb_addr)); + (void *)psb->psb_addr, psb->psb_refcnt, psb->psb_flags)); /* initialize some of the fields of user visible buffer header */ psb->psb_hdr->hdr_version = PFM_SMPL_VERSION; @@ -785,6 +884,11 @@ } ctx_flags = pfx->ctx_flags; + if ((ctx_flags & PFM_FL_INHERIT_MASK) == (PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)) { + DBprintk(("invalid inherit mask 0x%x\n",ctx_flags & PFM_FL_INHERIT_MASK)); + return -EINVAL; + } + if (ctx_flags & PFM_FL_SYSTEM_WIDE) { DBprintk(("cpu_mask=0x%lx\n", pfx->ctx_cpu_mask)); /* @@ -804,8 +908,8 @@ /* * and it must be a valid CPU */ - cpu = ffs(pfx->ctx_cpu_mask); - if (cpu > smp_num_cpus) { + cpu = ffz(~pfx->ctx_cpu_mask); + if (cpu_is_online(cpu) == 0) { DBprintk(("CPU%d is not online\n", cpu)); return -EINVAL; } @@ -823,7 +927,15 @@ * must provide a target for the signal in blocking mode even when * no counter is configured with PFM_FL_REG_OVFL_NOTIFY */ - if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == 0) return -EINVAL; + if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == 0) { + DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid)); + return -EINVAL; + } + + if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) { + DBprintk(("cannot notify self when blocking for [%d]\n", task->pid)); + return -EINVAL; + } } /* probably more to add here */ @@ -831,7 +943,7 @@ } static int -pfm_create_context(struct task_struct *task, pfm_context_t *ctx, void *req, int count, +pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int count, struct pt_regs *regs) { pfarg_context_t tmp; @@ -862,7 +974,7 @@ if (ctx_flags & PFM_FL_SYSTEM_WIDE) { /* at this point, we know there is at least one bit set */ - cpu = ffs(tmp.ctx_cpu_mask) - 1; + cpu = ffz(~tmp.ctx_cpu_mask); DBprintk(("requesting CPU%d currently on CPU%d\n",cpu, smp_processor_id())); @@ -956,7 +1068,7 @@ } if (tmp.ctx_smpl_entries) { - DBprintk(("sampling entries=%ld\n",tmp.ctx_smpl_entries)); + DBprintk(("sampling entries=%lu\n",tmp.ctx_smpl_entries)); ret = pfm_smpl_buffer_alloc(ctx, tmp.ctx_smpl_regs, tmp.ctx_smpl_entries, &uaddr); @@ -982,20 +1094,12 @@ atomic_set(&ctx->ctx_last_cpu,-1); /* SMP only, means no CPU */ - /* - * Keep track of the pmds we want to sample - * XXX: may be we don't need to save/restore the DEAR/IEAR pmds - * but we do need the BTB for sure. This is because of a hardware - * buffer of 1 only for non-BTB pmds. - * - * We ignore the unimplemented pmds specified by the user - */ - ctx->ctx_used_pmds[0] = tmp.ctx_smpl_regs[0] & pmu_conf.impl_regs[4]; - ctx->ctx_saved_pmcs[0] = 1; /* always save/restore PMC[0] */ + /* may be redudant with memset() but at least it's easier to remember */ + atomic_set(&ctx->ctx_saving_in_progress, 0); + atomic_set(&ctx->ctx_is_busy, 0); sema_init(&ctx->ctx_restart_sem, 0); /* init this semaphore to locked */ - if (copy_to_user(req, &tmp, sizeof(tmp))) { ret = -EFAULT; goto buffer_error; @@ -1097,21 +1201,22 @@ current->pid, flag == PFM_RELOAD_LONG_RESET ? "long" : "short", i, val)); } + ia64_srlz_d(); /* just in case ! */ ctx->ctx_ovfl_regs[0] = 0UL; } static int -pfm_write_pmcs(struct task_struct *ta, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) +pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { - struct thread_struct *th = &ta->thread; + struct thread_struct *th = &task->thread; pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg; unsigned int cnum; int i; int ret = 0, reg_retval = 0; /* we don't quite support this right now */ - if (ta != current) return -EINVAL; + if (task != current) return -EINVAL; if (!CTX_IS_ENABLED(ctx)) return -EINVAL; @@ -1140,30 +1245,30 @@ * - per-task : user monitor * any other configuration is rejected. */ - if (PMC_IS_MONITOR(cnum)) { - pfm_monitor_t *p = (pfm_monitor_t *)&tmp.reg_value; - - DBprintk(("pmc[%u].pm = %d\n", cnum, p->pmc_pm)); + if (PMC_IS_MONITOR(cnum) || PMC_IS_COUNTING(cnum)) { + DBprintk(("pmc[%u].pm=%ld\n", cnum, PMC_PM(cnum, tmp.reg_value))); - if (ctx->ctx_fl_system ^ p->pmc_pm) { - //if ((ctx->ctx_fl_system == 1 && p->pmc_pm == 0) - // ||(ctx->ctx_fl_system == 0 && p->pmc_pm == 1)) { + if (ctx->ctx_fl_system ^ PMC_PM(cnum, tmp.reg_value)) { + DBprintk(("pmc_pm=%ld fl_system=%d\n", PMC_PM(cnum, tmp.reg_value), ctx->ctx_fl_system)); ret = -EINVAL; goto abort_mission; } - /* - * enforce generation of overflow interrupt. Necessary on all - * CPUs which do not implement 64-bit hardware counters. - */ - p->pmc_oi = 1; } if (PMC_IS_COUNTING(cnum)) { + pfm_monitor_t *p = (pfm_monitor_t *)&tmp.reg_value; + /* + * enforce generation of overflow interrupt. Necessary on all + * CPUs. + */ + p->pmc_oi = 1; + if (tmp.reg_flags & PFM_REGFL_OVFL_NOTIFY) { /* * must have a target for the signal */ if (ctx->ctx_notify_task == NULL) { + DBprintk(("no notify_task && PFM_REGFL_OVFL_NOTIFY\n")); ret = -EINVAL; goto abort_mission; } @@ -1177,14 +1282,11 @@ ctx->ctx_soft_pmds[cnum].reset_pmds[1] = tmp.reg_reset_pmds[1]; ctx->ctx_soft_pmds[cnum].reset_pmds[2] = tmp.reg_reset_pmds[2]; ctx->ctx_soft_pmds[cnum].reset_pmds[3] = tmp.reg_reset_pmds[3]; - - /* - * needed in case the user does not initialize the equivalent - * PMD. Clearing is done in reset_pmu() so there is no possible - * leak here. - */ - CTX_USED_PMD(ctx, cnum); } + /* + * execute write checker, if any + */ + if (PMC_WR_FUNC(cnum)) ret = PMC_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs); abort_mission: if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL; @@ -1204,14 +1306,21 @@ */ if (ret != 0) { DBprintk(("[%d] pmc[%u]=0x%lx error %d\n", - ta->pid, cnum, tmp.reg_value, reg_retval)); + task->pid, cnum, tmp.reg_value, reg_retval)); break; } /* * We can proceed with this register! */ - + + /* + * Needed in case the user does not initialize the equivalent + * PMD. Clearing is done in reset_pmu() so there is no possible + * leak here. + */ + CTX_USED_PMD(ctx, pmu_conf.pmc_desc[cnum].dep_pmd[0]); + /* * keep copy the pmc, used for register reload */ @@ -1219,17 +1328,17 @@ ia64_set_pmc(cnum, tmp.reg_value); - DBprintk(("[%d] pmc[%u]=0x%lx flags=0x%x save_pmcs=0%lx reload_pmcs=0x%lx\n", - ta->pid, cnum, tmp.reg_value, + DBprintk(("[%d] pmc[%u]=0x%lx flags=0x%x used_pmds=0x%lx\n", + task->pid, cnum, tmp.reg_value, ctx->ctx_soft_pmds[cnum].flags, - ctx->ctx_saved_pmcs[0], ctx->ctx_reload_pmcs[0])); + ctx->ctx_used_pmds[0])); } return ret; } static int -pfm_write_pmds(struct task_struct *ta, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) +pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg; unsigned int cnum; @@ -1237,7 +1346,7 @@ int ret = 0, reg_retval = 0; /* we don't quite support this right now */ - if (ta != current) return -EINVAL; + if (task != current) return -EINVAL; /* * Cannot do anything before PMU is enabled @@ -1252,7 +1361,6 @@ if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; cnum = tmp.reg_num; - if (!PMD_IS_IMPL(cnum)) { ret = -EINVAL; goto abort_mission; @@ -1266,6 +1374,10 @@ ctx->ctx_soft_pmds[cnum].short_reset = tmp.reg_short_reset; } + /* + * execute write checker, if any + */ + if (PMD_WR_FUNC(cnum)) ret = PMD_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs); abort_mission: if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL; @@ -1282,21 +1394,24 @@ */ if (ret != 0) { DBprintk(("[%d] pmc[%u]=0x%lx error %d\n", - ta->pid, cnum, tmp.reg_value, reg_retval)); + task->pid, cnum, tmp.reg_value, reg_retval)); break; } /* keep track of what we use */ - CTX_USED_PMD(ctx, cnum); + CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]); + /* mark this register as used as well */ + CTX_USED_PMD(ctx, RDEP(cnum)); /* writes to unimplemented part is ignored, so this is safe */ - ia64_set_pmd(cnum, tmp.reg_value); + ia64_set_pmd(cnum, tmp.reg_value & pmu_conf.perf_ovfl_val); /* to go away */ ia64_srlz_d(); + DBprintk(("[%d] pmd[%u]: soft_pmd=0x%lx short_reset=0x%lx " "long_reset=0x%lx hw_pmd=%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx\n", - ta->pid, cnum, + task->pid, cnum, ctx->ctx_soft_pmds[cnum].val, ctx->ctx_soft_pmds[cnum].short_reset, ctx->ctx_soft_pmds[cnum].long_reset, @@ -1309,12 +1424,13 @@ } static int -pfm_read_pmds(struct task_struct *ta, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) +pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { - struct thread_struct *th = &ta->thread; + struct thread_struct *th = &task->thread; unsigned long val=0; pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg; - int i; + unsigned int cnum; + int i, ret = 0; if (!CTX_IS_ENABLED(ctx)) return -EINVAL; @@ -1327,14 +1443,25 @@ /* XXX: ctx locking may be required here */ - DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), ta->pid)); + DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid)); for (i = 0; i < count; i++, req++) { - unsigned long reg_val = ~0UL, ctx_val = ~0UL; + unsigned long ctx_val = ~0UL; if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; - if (!PMD_IS_IMPL(tmp.reg_num)) goto abort_mission; + cnum = tmp.reg_num; + + if (!PMD_IS_IMPL(cnum)) goto abort_mission; + /* + * we can only read the register that we use. That includes + * the one we explicitely initialize AND the one we want included + * in the sampling buffer (smpl_regs). + * + * Having this restriction allows optimization in the ctxsw routine + * without compromising security (leaks) + */ + if (!CTX_IS_USED_PMD(ctx, cnum)) goto abort_mission; /* * If the task is not the current one, then we check if the @@ -1343,8 +1470,8 @@ */ if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){ ia64_srlz_d(); - val = reg_val = ia64_get_pmd(tmp.reg_num); - DBprintk(("reading pmd[%u]=0x%lx from hw\n", tmp.reg_num, val)); + val = ia64_get_pmd(cnum); + DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val)); } else { #ifdef CONFIG_SMP int cpu; @@ -1360,30 +1487,35 @@ */ cpu = atomic_read(&ctx->ctx_last_cpu); if (cpu != -1) { - DBprintk(("must fetch on CPU%d for [%d]\n", cpu, ta->pid)); - pfm_fetch_regs(cpu, ta, ctx); + DBprintk(("must fetch on CPU%d for [%d]\n", cpu, task->pid)); + pfm_fetch_regs(cpu, task, ctx); } #endif /* context has been saved */ - val = reg_val = th->pmd[tmp.reg_num]; + val = th->pmd[cnum]; } - if (PMD_IS_COUNTING(tmp.reg_num)) { + if (PMD_IS_COUNTING(cnum)) { /* * XXX: need to check for overflow */ val &= pmu_conf.perf_ovfl_val; - val += ctx_val = ctx->ctx_soft_pmds[tmp.reg_num].val; - } else { + val += ctx_val = ctx->ctx_soft_pmds[cnum].val; + } - val = reg_val = ia64_get_pmd(tmp.reg_num); - } - PFM_REG_RETFLAG_SET(tmp.reg_flags, 0); tmp.reg_value = val; - DBprintk(("read pmd[%u] soft_pmd=0x%lx reg=0x%lx pmc=0x%lx\n", - tmp.reg_num, ctx_val, reg_val, - ia64_get_pmc(tmp.reg_num))); + /* + * execute read checker, if any + */ + if (PMD_RD_FUNC(cnum)) { + ret = PMD_RD_FUNC(cnum)(task, cnum, &tmp.reg_value, regs); + } + + PFM_REG_RETFLAG_SET(tmp.reg_flags, ret); + + DBprintk(("read pmd[%u] ret=%d value=0x%lx pmc=0x%lx\n", + cnum, ret, val, ia64_get_pmc(cnum))); if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT; } @@ -1391,7 +1523,7 @@ abort_mission: PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL); /* - * XXX: if this fails, we stick we the original failure, flag not updated! + * XXX: if this fails, we stick with the original failure, flag not updated! */ copy_to_user(req, &tmp, sizeof(tmp)); return -EINVAL; @@ -1426,15 +1558,11 @@ */ if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; - /* - * XXX: not pretty - */ LOCK_PFS(); /* - * We only allow the use of debug registers when there is no system - * wide monitoring - * XXX: we could relax this by + * We cannot allow setting breakpoints when system wide monitoring + * sessions are using the debug registers. */ if (pfm_sessions.pfs_sys_use_dbregs> 0) ret = -1; @@ -1459,6 +1587,7 @@ * perfmormance monitoring, so we only decrement the number * of "ptraced" debug register users to keep the count up to date */ + int pfm_release_debug_registers(struct task_struct *task) { @@ -1505,13 +1634,6 @@ */ if (!CTX_IS_ENABLED(ctx)) return -EINVAL; -#if 0 - if (ctx->ctx_fl_frozen==0) { - printk("task %d without pmu_frozen set\n", task->pid); - return -EINVAL; - } -#endif - if (task == current) { DBprintk(("restarting self %d frozen=%d \n", current->pid, ctx->ctx_fl_frozen)); @@ -1554,7 +1676,6 @@ up(sem); } else { task->thread.pfm_ovfl_block_reset = 1; - set_tsk_thread_flag(current, TIF_NOTIFY_RESUME); } #if 0 /* @@ -1629,25 +1750,35 @@ current->pid, ctx->ctx_fl_system, PMU_OWNER(), current)); + /* simply stop monitoring but not the PMU */ if (ctx->ctx_fl_system) { - __asm__ __volatile__ ("rsm psr.pp;;"::: "memory"); - /* disable dcr pp */ ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); + /* stop monitoring */ + __asm__ __volatile__ ("rsm psr.pp;;"::: "memory"); + + ia64_srlz_i(); + #ifdef CONFIG_SMP - local_cpu_data->pfm_dcr_pp = 0; + this_cpu(pfm_dcr_pp) = 0; #else pfm_tasklist_toggle_pp(0); #endif - ia64_psr(regs)->pp = 0; } else { + + /* stop monitoring */ __asm__ __volatile__ ("rum psr.up;;"::: "memory"); + ia64_srlz_i(); + + /* + * clear user level psr.up + */ ia64_psr(regs)->up = 0; } return 0; @@ -1674,7 +1805,7 @@ ia64_psr(regs)->up = 0; } /* - * goes back to default behavior + * goes back to default behavior: no user level control * no need to change live psr.sp because useless at the kernel level */ ia64_psr(regs)->sp = 1; @@ -1686,10 +1817,8 @@ return 0; } - - static int -pfm_destroy_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, +pfm_context_destroy(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { /* we don't quite support this right now */ @@ -1715,15 +1844,14 @@ ia64_psr(regs)->up = 0; } - /* restore security level */ - ia64_psr(regs)->sp = 1; - skipped_stop: /* * remove sampling buffer mapping, if any */ - if (ctx->ctx_smpl_vaddr) pfm_remove_smpl_mapping(task); - + if (ctx->ctx_smpl_vaddr) { + pfm_remove_smpl_mapping(task); + ctx->ctx_smpl_vaddr = 0UL; + } /* now free context and related state */ pfm_context_exit(task); @@ -1734,7 +1862,7 @@ * does nothing at the moment */ static int -pfm_unprotect_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, +pfm_context_unprotect(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) { return 0; @@ -1764,9 +1892,9 @@ { unsigned int mode = *(unsigned int *)arg; - pfm_debug_mode = mode == 0 ? 0 : 1; + pfm_sysctl.debug = mode == 0 ? 0 : 1; - printk("perfmon debugging %s\n", pfm_debug_mode ? "on" : "off"); + printk("perfmon debugging %s\n", pfm_sysctl.debug ? "on" : "off"); return 0; } @@ -1794,7 +1922,6 @@ dbr_mask_reg_t dbr; } dbreg_t; - static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs) { @@ -1836,8 +1963,8 @@ if (ctx->ctx_fl_system) { /* we mark ourselves as owner of the debug registers */ ctx->ctx_fl_using_dbreg = 1; - } else { - if (ctx->ctx_fl_using_dbreg == 0) { + DBprintk(("system-wide setting fl_using_dbreg for [%d]\n", task->pid)); + } else if (first_time) { ret= -EBUSY; if ((thread->flags & IA64_THREAD_DBG_VALID) != 0) { DBprintk(("debug registers already in use for [%d]\n", task->pid)); @@ -1846,6 +1973,7 @@ /* we mark ourselves as owner of the debug registers */ ctx->ctx_fl_using_dbreg = 1; + DBprintk(("setting fl_using_dbreg for [%d]\n", task->pid)); /* * Given debug registers cannot be used for both debugging * and performance monitoring at the same time, we reuse @@ -1853,18 +1981,27 @@ */ memset(task->thread.dbr, 0, sizeof(task->thread.dbr)); memset(task->thread.ibr, 0, sizeof(task->thread.ibr)); + } - /* - * clear hardware registers to make sure we don't leak - * information and pick up stale state - */ - for (i=0; i < pmu_conf.num_ibrs; i++) { - ia64_set_ibr(i, 0UL); - } - for (i=0; i < pmu_conf.num_dbrs; i++) { - ia64_set_dbr(i, 0UL); - } + if (first_time) { + DBprintk(("[%d] clearing ibrs,dbrs\n", task->pid)); + /* + * clear hardware registers to make sure we don't + * pick up stale state. + * + * for a system wide session, we do not use + * thread.dbr, thread.ibr because this process + * never leaves the current CPU and the state + * is shared by all processes running on it + */ + for (i=0; i < pmu_conf.num_ibrs; i++) { + ia64_set_ibr(i, 0UL); + } + ia64_srlz_i(); + for (i=0; i < pmu_conf.num_dbrs; i++) { + ia64_set_dbr(i, 0UL); } + ia64_srlz_d(); } ret = -EFAULT; @@ -1924,6 +2061,7 @@ CTX_USED_IBR(ctx, rnum); ia64_set_ibr(rnum, dbreg.val); + ia64_srlz_i(); thread->ibr[rnum] = dbreg.val; @@ -1932,6 +2070,7 @@ CTX_USED_DBR(ctx, rnum); ia64_set_dbr(rnum, dbreg.val); + ia64_srlz_d(); thread->dbr[rnum] = dbreg.val; @@ -2031,27 +2170,35 @@ if (ctx->ctx_fl_system) { - /* enable dcr pp */ - ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP); - #ifdef CONFIG_SMP - local_cpu_data->pfm_dcr_pp = 1; + this_cpu(pfm_dcr_pp) = 1; #else pfm_tasklist_toggle_pp(1); #endif + /* set user level psr.pp */ ia64_psr(regs)->pp = 1; + /* start monitoring at kernel level */ __asm__ __volatile__ ("ssm psr.pp;;"::: "memory"); + /* enable dcr pp */ + ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP); + + ia64_srlz_i(); + } else { if ((task->thread.flags & IA64_THREAD_PM_VALID) == 0) { printk("perfmon: pfm_start task flag not set for [%d]\n", task->pid); return -EINVAL; } + /* set user level psr.up */ ia64_psr(regs)->up = 1; + + /* start monitoring at kernel level */ __asm__ __volatile__ ("sum psr.up;;"::: "memory"); + + ia64_srlz_i(); } - ia64_srlz_d(); return 0; } @@ -2074,11 +2221,13 @@ ia64_psr(regs)->pp = 0; ia64_psr(regs)->up = 0; /* just to make sure! */ + /* make sure monitoring is stopped */ __asm__ __volatile__ ("rsm psr.pp;;"::: "memory"); + ia64_srlz_i(); #ifdef CONFIG_SMP - local_cpu_data->pfm_syst_wide = 1; - local_cpu_data->pfm_dcr_pp = 0; + this_cpu(pfm_syst_wide) = 1; + this_cpu(pfm_dcr_pp) = 0; #endif } else { /* @@ -2089,21 +2238,21 @@ ia64_psr(regs)->pp = 0; /* just to make sure! */ ia64_psr(regs)->up = 0; + /* make sure monitoring is stopped */ __asm__ __volatile__ ("rum psr.up;;"::: "memory"); - /* - * allow user control (user monitors only) - if (task == ctx->ctx_owner) { - */ - { - DBprintk(("clearing psr.sp for [%d]\n", current->pid)); - ia64_psr(regs)->sp = 0; - } + ia64_srlz_i(); + + DBprintk(("clearing psr.sp for [%d]\n", current->pid)); + + /* allow user level control */ + ia64_psr(regs)->sp = 0; + + /* PMU state will be saved/restored on ctxsw */ task->thread.flags |= IA64_THREAD_PM_VALID; } SET_PMU_OWNER(task); - ctx->ctx_flags.state = PFM_CTX_ENABLED; atomic_set(&ctx->ctx_last_cpu, smp_processor_id()); @@ -2114,6 +2263,40 @@ return 0; } +static int +pfm_get_pmc_reset(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, + struct pt_regs *regs) +{ + pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg; + unsigned int cnum; + int i; + + for (i = 0; i < count; i++, req++) { + + if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; + + cnum = tmp.reg_num; + + if (!PMC_IS_IMPL(cnum)) goto abort_mission; + + tmp.reg_value = reset_pmcs[cnum]; + + PFM_REG_RETFLAG_SET(tmp.reg_flags, 0); + + DBprintk(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, tmp.reg_value)); + + if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT; + } + return 0; +abort_mission: + PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL); + /* + * XXX: if this fails, we stick with the original failure, flag not updated! + */ + copy_to_user(req, &tmp, sizeof(tmp)); + return -EINVAL; +} + /* * functions MUST be listed in the increasing order of their index (see permfon.h) */ @@ -2121,19 +2304,19 @@ /* 0 */{ NULL, 0, 0, 0}, /* not used */ /* 1 */{ pfm_write_pmcs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)}, /* 2 */{ pfm_write_pmds, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)}, -/* 3 */{ pfm_read_pmds, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)}, +/* 3 */{ pfm_read_pmds,PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)}, /* 4 */{ pfm_stop, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, /* 5 */{ pfm_start, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, /* 6 */{ pfm_enable, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, /* 7 */{ pfm_disable, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, -/* 8 */{ pfm_create_context, PFM_CMD_ARG_READ, 1, sizeof(pfarg_context_t)}, -/* 9 */{ pfm_destroy_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, +/* 8 */{ pfm_context_create, PFM_CMD_PID|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, 1, sizeof(pfarg_context_t)}, +/* 9 */{ pfm_context_destroy, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, /* 10 */{ pfm_restart, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_NOCHK, 0, 0}, /* 11 */{ pfm_protect_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, /* 12 */{ pfm_get_features, PFM_CMD_ARG_WRITE, 0, 0}, /* 13 */{ pfm_debug, 0, 1, sizeof(unsigned int)}, -/* 14 */{ pfm_unprotect_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, -/* 15 */{ NULL, 0, 0, 0}, /* not used */ +/* 14 */{ pfm_context_unprotect, PFM_CMD_PID|PFM_CMD_CTX, 0, 0}, +/* 15 */{ pfm_get_pmc_reset, PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)}, /* 16 */{ NULL, 0, 0, 0}, /* not used */ /* 17 */{ NULL, 0, 0, 0}, /* not used */ /* 18 */{ NULL, 0, 0, 0}, /* not used */ @@ -2167,19 +2350,10 @@ * after the task is marked as STOPPED but before pfm_save_regs() * is completed. */ - for (;;) { - - task_lock(task); - if (1 /*XXX !task_has_cpu(task)*/) break; - task_unlock(task); - - do { - if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) return -EBUSY; - barrier(); - cpu_relax(); - } while (0 /*task_has_cpu(task)*/); - } - task_unlock(task); + if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) return -EBUSY; + DBprintk(("before wait_task_inactive [%d] state %ld\n", task->pid, task->state)); + wait_task_inactive(task); + DBprintk(("after wait_task_inactive [%d] state %ld\n", task->pid, task->state)); #else if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) { DBprintk(("warning [%d] not in stable state %ld\n", task->pid, task->state)); @@ -2195,9 +2369,9 @@ { struct pt_regs *regs = (struct pt_regs *)&stack; struct task_struct *task = current; - pfm_context_t *ctx = task->thread.pfm_context; + pfm_context_t *ctx; size_t sz; - int ret = -ESRCH, narg; + int ret, narg; /* * reject any call if perfmon was disabled at initialization time @@ -2227,6 +2401,8 @@ if (pid != current->pid) { + ret = -ESRCH; + read_lock(&tasklist_lock); task = find_task_by_pid(pid); @@ -2241,10 +2417,11 @@ ret = check_task_state(task); if (ret != 0) goto abort_call; } - ctx = task->thread.pfm_context; } } + ctx = task->thread.pfm_context; + if (PFM_CMD_USE_CTX(cmd)) { ret = -EINVAL; if (ctx == NULL) { @@ -2273,7 +2450,7 @@ } void -pfm_ovfl_block_reset (void) +pfm_ovfl_block_reset(void) { struct thread_struct *th = ¤t->thread; pfm_context_t *ctx = current->thread.pfm_context; @@ -2353,18 +2530,17 @@ int j; -pfm_recorded_samples_count++; idx = ia64_fetch_and_add(1, &psb->psb_index); - DBprintk(("recording index=%ld entries=%ld\n", idx-1, psb->psb_entries)); + DBprintk_ovfl(("recording index=%ld entries=%ld\n", idx-1, psb->psb_entries)); /* - * XXX: there is a small chance that we could run out on index before resetting - * but index is unsigned long, so it will take some time..... - * We use > instead of == because fetch_and_add() is off by one (see below) - * - * This case can happen in non-blocking mode or with multiple processes. - * For non-blocking, we need to reload and continue. - */ + * XXX: there is a small chance that we could run out on index before resetting + * but index is unsigned long, so it will take some time..... + * We use > instead of == because fetch_and_add() is off by one (see below) + * + * This case can happen in non-blocking mode or with multiple processes. + * For non-blocking, we need to reload and continue. + */ if (idx > psb->psb_entries) return 0; /* first entry is really entry 0, not 1 caused by fetch_and_add */ @@ -2375,7 +2551,7 @@ /* * initialize entry header */ - h->pid = task->pid; + h->pid = current->pid; h->cpu = smp_processor_id(); h->rate = 0; /* XXX: add the sampling rate used here */ h->ip = regs ? regs->cr_iip : 0x0; /* where did the fault happened */ @@ -2403,24 +2579,27 @@ } else { *e = ia64_get_pmd(j); /* slow */ } - DBprintk(("e=%p pmd%d =0x%lx\n", (void *)e, j, *e)); + DBprintk_ovfl(("e=%p pmd%d =0x%lx\n", (void *)e, j, *e)); e++; } + pfm_stats.pfm_recorded_samples_count++; + /* * make the new entry visible to user, needs to be atomic */ ia64_fetch_and_add(1, &psb->psb_hdr->hdr_count); - DBprintk(("index=%ld entries=%ld hdr_count=%ld\n", + DBprintk_ovfl(("index=%ld entries=%ld hdr_count=%ld\n", idx, psb->psb_entries, psb->psb_hdr->hdr_count)); /* * sampling buffer full ? */ if (idx == (psb->psb_entries-1)) { - DBprintk(("sampling buffer full\n")); + DBprintk_ovfl(("sampling buffer full\n")); /* * XXX: must reset buffer in blocking mode and lost notified */ + pfm_stats.pfm_full_smpl_buffer_count++; return 1; } return 0; @@ -2433,15 +2612,13 @@ * new value of pmc[0]. if 0x0 then unfreeze, else keep frozen */ static unsigned long -pfm_overflow_handler(struct task_struct *task, u64 pmc0, struct pt_regs *regs) +pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) { unsigned long mask; struct thread_struct *t; - pfm_context_t *ctx; unsigned long old_val; unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL; int i; - int my_cpu = smp_processor_id(); int ret = 1; struct siginfo si; /* @@ -2457,18 +2634,7 @@ * valid one, i.e. the one that caused the interrupt. */ - if (task == NULL) { - DBprintk(("owners[%d]=NULL\n", my_cpu)); - return 0x1; - } t = &task->thread; - ctx = task->thread.pfm_context; - - if (!ctx) { - printk("perfmon: Spurious overflow interrupt: process %d has no PFM context\n", - task->pid); - return 0; - } /* * XXX: debug test @@ -2490,12 +2656,12 @@ mask = pmc0 >> PMU_FIRST_COUNTER; - DBprintk(("pmc0=0x%lx pid=%d iip=0x%lx, %s" - " mode used_pmds=0x%lx save_pmcs=0x%lx reload_pmcs=0x%lx\n", + DBprintk_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s" + " mode used_pmds=0x%lx used_pmcs=0x%lx reload_pmcs=0x%lx\n", pmc0, task->pid, (regs ? regs->cr_iip : 0), CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", ctx->ctx_used_pmds[0], - ctx->ctx_saved_pmcs[0], + ctx->ctx_used_pmcs[0], ctx->ctx_reload_pmcs[0])); /* @@ -2506,7 +2672,7 @@ /* skip pmd which did not overflow */ if ((mask & 0x1) == 0) continue; - DBprintk(("PMD[%d] overflowed hw_pmd=0x%lx soft_pmd=0x%lx\n", + DBprintk_ovfl(("pmd[%d] overflowed hw_pmd=0x%lx soft_pmd=0x%lx\n", i, ia64_get_pmd(i), ctx->ctx_soft_pmds[i].val)); /* @@ -2518,8 +2684,7 @@ old_val = ctx->ctx_soft_pmds[i].val; ctx->ctx_soft_pmds[i].val = 1 + pmu_conf.perf_ovfl_val + pfm_read_soft_counter(ctx, i); - - DBprintk(("soft_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx\n", + DBprintk_ovfl(("soft_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx\n", i, ctx->ctx_soft_pmds[i].val, old_val, ia64_get_pmd(i) & pmu_conf.perf_ovfl_val)); @@ -2536,7 +2701,7 @@ ovfl_pmds |= 1UL << i; - DBprintk(("soft_pmd[%d] overflowed flags=0x%x, ovfl=0x%lx\n", i, ctx->ctx_soft_pmds[i].flags, ovfl_pmds)); + DBprintk_ovfl(("soft_pmd[%d] overflowed flags=0x%x, ovfl=0x%lx\n", i, ctx->ctx_soft_pmds[i].flags, ovfl_pmds)); if (PMC_OVFL_NOTIFY(ctx, i)) { ovfl_notify |= 1UL << i; @@ -2575,7 +2740,8 @@ * No overflow requiring a user level notification */ if (ovfl_notify == 0UL) { - pfm_reset_regs(ctx, &ovfl_pmds, PFM_RELOAD_SHORT_RESET); + if (ovfl_pmds) + pfm_reset_regs(ctx, &ovfl_pmds, PFM_RELOAD_SHORT_RESET); return 0x0; } @@ -2650,7 +2816,7 @@ * necessarily go to the signal handler (if any) when it goes back to * user mode. */ - DBprintk(("[%d] sending notification to [%d]\n", + DBprintk_ovfl(("[%d] sending notification to [%d]\n", task->pid, ctx->ctx_notify_task->pid)); @@ -2683,7 +2849,7 @@ * before, changing it to NULL will still maintain this invariant. * Of course, when it is equal to current it cannot change at this point. */ - DBprintk(("block=%d notify [%d] current [%d]\n", + DBprintk_ovfl(("block=%d notify [%d] current [%d]\n", ctx->ctx_fl_block, ctx->ctx_notify_task ? ctx->ctx_notify_task->pid: -1, current->pid )); @@ -2694,7 +2860,7 @@ } else { lost_notify: /* XXX: more to do here, to convert to non-blocking (reset values) */ - DBprintk(("notification task has disappeared !\n")); + DBprintk_ovfl(("notification task has disappeared !\n")); /* * for a non-blocking context, we make sure we do not fall into the * pfm_overflow_notify() trap. Also in the case of a blocking context with lost @@ -2716,7 +2882,7 @@ */ ctx->ctx_fl_frozen = 1; - DBprintk(("reload pmc0=0x%x must_block=%ld\n", + DBprintk_ovfl(("return pmc0=0x%x must_block=%ld\n", ctx->ctx_fl_frozen ? 0x1 : 0x0, t->pfm_ovfl_block_reset)); return ctx->ctx_fl_frozen ? 0x1 : 0x0; @@ -2727,8 +2893,9 @@ { u64 pmc0; struct task_struct *task; + pfm_context_t *ctx; - pfm_ovfl_intr_count++; + pfm_stats.pfm_ovfl_intr_count++; /* * srlz.d done before arriving here @@ -2742,24 +2909,54 @@ * assumes : if any PM[0].bit[63-1] is set, then PMC[0].fr = 1 */ if ((pmc0 & ~0x1UL)!=0UL && (task=PMU_OWNER())!= NULL) { - /* - * assumes, PMC[0].fr = 1 at this point - * - * XXX: change protype to pass &pmc0 + * we assume that pmc0.fr is always set here */ - pmc0 = pfm_overflow_handler(task, pmc0, regs); + ctx = task->thread.pfm_context; - /* we never explicitely freeze PMU here */ - if (pmc0 == 0) { - ia64_set_pmc(0, 0); - ia64_srlz_d(); + /* sanity check */ + if (!ctx) { + printk("perfmon: Spurious overflow interrupt: process %d has no PFM context\n", + task->pid); + return; } +#ifdef CONFIG_SMP + /* + * Because an IPI has higher priority than the PMU overflow interrupt, it is + * possible that the handler be interrupted by a request from another CPU to fetch + * the PMU state of the currently active context. The task may have just been + * migrated to another CPU which is trying to restore the context. If there was + * a pending overflow interrupt when the task left this CPU, it is possible for + * the handler to get interrupt by the IPI. In which case, we fetch request + * MUST be postponed until the interrupt handler is done. The ctx_is_busy + * flag indicates such a condition. The other CPU must busy wait until it's cleared. + */ + atomic_set(&ctx->ctx_is_busy, 1); +#endif + + /* + * assume PMC[0].fr = 1 at this point + */ + pmc0 = pfm_overflow_handler(task, ctx, pmc0, regs); + + /* + * We always clear the overflow status bits and either unfreeze + * or keep the PMU frozen. + */ + ia64_set_pmc(0, pmc0); + ia64_srlz_d(); + +#ifdef CONFIG_SMP + /* + * announce that we are doing with the context + */ + atomic_set(&ctx->ctx_is_busy, 0); +#endif } else { - pfm_spurious_ovfl_intr_count++; + pfm_stats.pfm_spurious_ovfl_intr_count++; - DBprintk(("perfmon: Spurious PMU overflow interrupt on CPU%d: pmc0=0x%lx owner=%p\n", - smp_processor_id(), pmc0, (void *)PMU_OWNER())); + printk("perfmon: Spurious PMU overflow interrupt on CPU%d: pmc0=0x%lx owner=%p\n", + smp_processor_id(), pmc0, (void *)PMU_OWNER()); } } @@ -2767,33 +2964,31 @@ static int perfmon_proc_info(char *page) { -#ifdef CONFIG_SMP -#define cpu_is_online(i) (cpu_online_map & (1UL << i)) -#else -#define cpu_is_online(i) 1 -#endif char *p = page; - u64 pmc0 = ia64_get_pmc(0); int i; - p += sprintf(p, "perfmon enabled: %s\n", pmu_conf.pfm_is_disabled ? "No": "Yes"); - - p += sprintf(p, "monitors_pmcs0]=0x%lx\n", pmu_conf.monitor_pmcs[0]); - p += sprintf(p, "counter_pmcds[0]=0x%lx\n", pmu_conf.counter_pmds[0]); - p += sprintf(p, "overflow interrupts=%lu\n", pfm_ovfl_intr_count); - p += sprintf(p, "spurious overflow interrupts=%lu\n", pfm_spurious_ovfl_intr_count); - p += sprintf(p, "recorded samples=%lu\n", pfm_recorded_samples_count); - - p += sprintf(p, "CPU%d.pmc[0]=%lx\nPerfmon debug: %s\n", - smp_processor_id(), pmc0, pfm_debug_mode ? "On" : "Off"); + p += sprintf(p, "enabled : %s\n", pmu_conf.pfm_is_disabled ? "No": "Yes"); + p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No"); + p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.perf_ovfl_val); + p += sprintf(p, "overflow intrs : %lu\n", pfm_stats.pfm_ovfl_intr_count); + p += sprintf(p, "spurious intrs : %lu\n", pfm_stats.pfm_spurious_ovfl_intr_count); + p += sprintf(p, "recorded samples : %lu\n", pfm_stats.pfm_recorded_samples_count); + p += sprintf(p, "smpl buffer full : %lu\n", pfm_stats.pfm_full_smpl_buffer_count); #ifdef CONFIG_SMP - p += sprintf(p, "CPU%d cpu_data.pfm_syst_wide=%d cpu_data.dcr_pp=%d\n", - smp_processor_id(), local_cpu_data->pfm_syst_wide, local_cpu_data->pfm_dcr_pp); + p += sprintf(p, "CPU%d syst_wide : %d\n" + "CPU%d dcr_pp : %d\n", + smp_processor_id(), + this_cpu(pfm_syst_wide), + smp_processor_id(), + this_cpu(pfm_dcr_pp)); #endif LOCK_PFS(); - p += sprintf(p, "proc_sessions=%lu\nsys_sessions=%lu\nsys_use_dbregs=%lu\nptrace_use_dbregs=%lu\n", + p += sprintf(p, "proc_sessions : %lu\n" + "sys_sessions : %lu\n" + "sys_use_dbregs : %lu\n" + "ptrace_use_dbregs: %lu\n", pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_sys_use_dbregs, @@ -2803,12 +2998,28 @@ for(i=0; i < NR_CPUS; i++) { if (cpu_is_online(i)) { - p += sprintf(p, "CPU%d.pmu_owner: %-6d\n", + p += sprintf(p, "CPU%d owner : %-6d\n", i, pmu_owners[i].owner ? pmu_owners[i].owner->pid: -1); } } + for(i=0; pmd_desc[i].type != PFM_REG_NONE; i++) { + p += sprintf(p, "PMD%-2d: %d 0x%lx 0x%lx\n", + i, + pmd_desc[i].type, + pmd_desc[i].dep_pmd[0], + pmd_desc[i].dep_pmc[0]); + } + + for(i=0; pmc_desc[i].type != PFM_REG_NONE; i++) { + p += sprintf(p, "PMC%-2d: %d 0x%lx 0x%lx\n", + i, + pmc_desc[i].type, + pmc_desc[i].dep_pmd[0], + pmc_desc[i].dep_pmc[0]); + } + return p - page; } @@ -2840,7 +3051,7 @@ /* * propagate the value of the dcr_pp bit to the psr */ - ia64_psr(regs)->pp = mode ? local_cpu_data->pfm_dcr_pp : 0; + ia64_psr(regs)->pp = mode ? this_cpu(pfm_dcr_pp) : 0; } #endif @@ -2867,6 +3078,7 @@ * It will be restored from ipsr when going back to user level */ __asm__ __volatile__ ("rum psr.up;;"::: "memory"); + ia64_srlz_i(); ctx->ctx_saved_psr = psr; @@ -2922,13 +3134,9 @@ for (i=0; mask; i++, mask>>=1) { if (mask & 0x1) t->pmd[i] =ia64_get_pmd(i); } - /* - * XXX: simplify to pmc0 only - */ - mask = ctx->ctx_saved_pmcs[0]; - for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) t->pmc[i] = ia64_get_pmc(i); - } + + /* save pmc0 */ + t->pmc[0] = ia64_get_pmc(0); /* not owned by this CPU */ atomic_set(&ctx->ctx_last_cpu, -1); @@ -2966,6 +3174,12 @@ PMU_OWNER() ? PMU_OWNER()->pid: -1, atomic_read(&ctx->ctx_saving_in_progress))); + /* must wait until not busy before retrying whole request */ + if (atomic_read(&ctx->ctx_is_busy)) { + arg->retval = 2; + return; + } + /* must wait if saving was interrupted */ if (atomic_read(&ctx->ctx_saving_in_progress)) { arg->retval = 1; @@ -2978,9 +3192,9 @@ return; } - DBprintk(("saving state for [%d] save_pmcs=0x%lx all_pmcs=0x%lx used_pmds=0x%lx\n", + DBprintk(("saving state for [%d] used_pmcs=0x%lx reload_pmcs=0x%lx used_pmds=0x%lx\n", arg->task->pid, - ctx->ctx_saved_pmcs[0], + ctx->ctx_used_pmcs[0], ctx->ctx_reload_pmcs[0], ctx->ctx_used_pmds[0])); @@ -2993,17 +3207,15 @@ /* * XXX needs further optimization. - * Also must take holes into account */ mask = ctx->ctx_used_pmds[0]; for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) t->pmd[i] =ia64_get_pmd(i); - } - - mask = ctx->ctx_saved_pmcs[0]; - for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) t->pmc[i] = ia64_get_pmc(i); + if (mask & 0x1) t->pmd[i] = ia64_get_pmd(i); } + + /* save pmc0 */ + t->pmc[0] = ia64_get_pmc(0); + /* not owned by this CPU */ atomic_set(&ctx->ctx_last_cpu, -1); @@ -3032,11 +3244,17 @@ arg.task = task; arg.retval = -1; + if (atomic_read(&ctx->ctx_is_busy)) { +must_wait_busy: + while (atomic_read(&ctx->ctx_is_busy)); + } + if (atomic_read(&ctx->ctx_saving_in_progress)) { DBprintk(("no IPI, must wait for [%d] to be saved on [%d]\n", task->pid, cpu)); - +must_wait_saving: /* busy wait */ while (atomic_read(&ctx->ctx_saving_in_progress)); + DBprintk(("done saving for [%d] on [%d]\n", task->pid, cpu)); return; } DBprintk(("calling CPU %d from CPU %d\n", cpu, smp_processor_id())); @@ -3056,11 +3274,8 @@ * This is the case, where we interrupted the saving which started just at the time we sent the * IPI. */ - if (arg.retval == 1) { - DBprintk(("must wait for [%d] to be saved on [%d]\n", task->pid, cpu)); - while (atomic_read(&ctx->ctx_saving_in_progress)); - DBprintk(("done saving for [%d] on [%d]\n", task->pid, cpu)); - } + if (arg.retval == 1) goto must_wait_saving; + if (arg.retval == 2) goto must_wait_busy; } #endif /* CONFIG_SMP */ @@ -3114,55 +3329,53 @@ pfm_fetch_regs(cpu, task, ctx); } #endif - t = &task->thread; + t = &task->thread; /* - * XXX: will be replaced by assembly routine - * We clear all unused PMDs to avoid leaking information + * To avoid leaking information to the user level when psr.sp=0, + * we must reload ALL implemented pmds (even the ones we don't use). + * In the kernel we only allow PFM_READ_PMDS on registers which + * we initialized or requested (sampling) so there is no risk there. + * + * As an optimization, we will only reload the PMD that we use when + * the context is in protected mode, i.e. psr.sp=1 because then there + * is no leak possible. */ - mask = ctx->ctx_used_pmds[0]; + mask = pfm_sysctl.fastctxsw || ctx->ctx_fl_protected ? ctx->ctx_used_pmds[0] : ctx->ctx_reload_pmds[0]; for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) - ia64_set_pmd(i, t->pmd[i]); - else - ia64_set_pmd(i, 0UL); + if (mask & 0x1) ia64_set_pmd(i, t->pmd[i] & pmu_conf.perf_ovfl_val); } - /* XXX: will need to clear all unused pmd, for security */ /* - * skip pmc[0] to avoid side-effects, - * all PMCs are systematically reloaded, unsued get default value - * to avoid picking up stale configuration + * PMC0 is never set in the mask because it is always restored + * separately. + * + * ALL PMCs are systematically reloaded, unused registers + * get their default (PAL reset) values to avoid picking up + * stale configuration. */ - mask = ctx->ctx_reload_pmcs[0]>>1; - for (i=1; mask; i++, mask>>=1) { + mask = ctx->ctx_reload_pmcs[0]; + for (i=0; mask; i++, mask>>=1) { if (mask & 0x1) ia64_set_pmc(i, t->pmc[i]); } /* - * restore debug registers when used for range restrictions. - * We must restore the unused registers to avoid picking up - * stale information. + * we restore ALL the debug registers to avoid picking up + * stale state. */ - mask = ctx->ctx_used_ibrs[0]; - for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) + if (ctx->ctx_fl_using_dbreg) { + for (i=0; i < pmu_conf.num_ibrs; i++) { ia64_set_ibr(i, t->ibr[i]); - else - ia64_set_ibr(i, 0UL); - } - - mask = ctx->ctx_used_dbrs[0]; - for (i=0; mask; i++, mask>>=1) { - if (mask & 0x1) + } + ia64_srlz_i(); + for (i=0; i < pmu_conf.num_dbrs; i++) { ia64_set_dbr(i, t->dbr[i]); - else - ia64_set_dbr(i, 0UL); + } } + ia64_srlz_d(); if (t->pmc[0] & ~0x1) { - ia64_srlz_d(); - pfm_overflow_handler(task, t->pmc[0], NULL); + pfm_overflow_handler(task, ctx, t->pmc[0], NULL); } /* @@ -3215,7 +3428,7 @@ * When restoring context, we must restore ALL pmcs, even the ones * that the task does not use to avoid leaks and possibly corruption * of the sesion because of configuration conflicts. So here, we - * initializaed the table used in the context switch restore routine. + * initialize the entire set used in the context switch restore routine. */ t->pmc[i] = reset_pmcs[i]; DBprintk((" pmc[%d]=0x%lx\n", i, reset_pmcs[i])); @@ -3224,39 +3437,61 @@ } /* * clear reset values for PMD. - * XX: good up to 64 PMDS. Suppose that zero is a valid value. + * XXX: good up to 64 PMDS. Suppose that zero is a valid value. */ mask = pmu_conf.impl_regs[4]; for(i=0; mask; mask>>=1, i++) { if (mask & 0x1) ia64_set_pmd(i, 0UL); + t->pmd[i] = 0UL; } /* - * On context switched restore, we must restore ALL pmc even + * On context switched restore, we must restore ALL pmc and ALL pmd even * when they are not actively used by the task. In UP, the incoming process - * may otherwise pick up left over PMC state from the previous process. + * may otherwise pick up left over PMC, PMD state from the previous process. * As opposed to PMD, stale PMC can cause harm to the incoming * process because they may change what is being measured. * Therefore, we must systematically reinstall the entire * PMC state. In SMP, the same thing is possible on the - * same CPU but also on between 2 CPUs. + * same CPU but also on between 2 CPUs. + * + * The problem with PMD is information leaking especially + * to user level when psr.sp=0 * * There is unfortunately no easy way to avoid this problem - * on either UP or SMP. This definitively slows down the - * pfm_load_regs(). + * on either UP or SMP. This definitively slows down the + * pfm_load_regs() function. */ /* * We must include all the PMC in this mask to make sure we don't - * see any side effect of the stale state, such as opcode matching + * see any side effect of a stale state, such as opcode matching * or range restrictions, for instance. + * + * We never directly restore PMC0 so we do not include it in the mask. */ - ctx->ctx_reload_pmcs[0] = pmu_conf.impl_regs[0]; + ctx->ctx_reload_pmcs[0] = pmu_conf.impl_regs[0] & ~0x1; + /* + * We must include all the PMD in this mask to avoid picking + * up stale value and leak information, especially directly + * at the user level when psr.sp=0 + */ + ctx->ctx_reload_pmds[0] = pmu_conf.impl_regs[4]; + + /* + * Keep track of the pmds we want to sample + * XXX: may be we don't need to save/restore the DEAR/IEAR pmds + * but we do need the BTB for sure. This is because of a hardware + * buffer of 1 only for non-BTB pmds. + * + * We ignore the unimplemented pmds specified by the user + */ + ctx->ctx_used_pmds[0] = ctx->ctx_smpl_regs[0] & pmu_conf.impl_regs[4]; + ctx->ctx_used_pmcs[0] = 1; /* always save/restore PMC[0] */ /* * useful in case of re-enable after disable */ - ctx->ctx_used_pmds[0] = 0UL; ctx->ctx_used_ibrs[0] = 0UL; ctx->ctx_used_dbrs[0] = 0UL; @@ -3278,7 +3513,7 @@ { pfm_context_t *ctx; u64 pmc0; - unsigned long mask, mask2, val; + unsigned long mask2, val; int i; ctx = task->thread.pfm_context; @@ -3300,22 +3535,28 @@ */ if (ctx->ctx_fl_system) { - __asm__ __volatile__ ("rsm psr.pp;;"::: "memory"); /* disable dcr pp */ ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP); + /* stop monitoring */ + __asm__ __volatile__ ("rsm psr.pp;;"::: "memory"); + + ia64_srlz_i(); + #ifdef CONFIG_SMP - local_cpu_data->pfm_syst_wide = 0; - local_cpu_data->pfm_dcr_pp = 0; + this_cpu(pfm_syst_wide) = 0; + this_cpu(pfm_dcr_pp) = 0; #else pfm_tasklist_toggle_pp(0); #endif - } else { + /* stop monitoring */ __asm__ __volatile__ ("rum psr.up;;"::: "memory"); + ia64_srlz_i(); + /* no more save/restore on ctxsw */ current->thread.flags &= ~IA64_THREAD_PM_VALID; } @@ -3349,7 +3590,7 @@ ia64_srlz_d(); /* - * We don't need to restore psr, because we are on our way out anyway + * We don't need to restore psr, because we are on our way out */ /* @@ -3365,10 +3606,12 @@ if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) printk("perfmon: [%d] last_cpu=%d\n", task->pid, atomic_read(&ctx->ctx_last_cpu)); - mask = pmc0 >> PMU_FIRST_COUNTER; - mask2 = ctx->ctx_used_pmds[0] >> PMU_FIRST_COUNTER; - - for (i = PMU_FIRST_COUNTER; mask2; i++, mask>>=1, mask2>>=1) { + /* + * we save all the used pmds + * we take care of overflows for pmds used as counters + */ + mask2 = ctx->ctx_used_pmds[0]; + for (i = 0; mask2; i++, mask2>>=1) { /* skip non used pmds */ if ((mask2 & 0x1) == 0) continue; @@ -3376,7 +3619,6 @@ val = ia64_get_pmd(i); if (PMD_IS_COUNTING(i)) { - DBprintk(("[%d] pmd[%d] soft_pmd=0x%lx hw_pmd=0x%lx\n", task->pid, i, ctx->ctx_soft_pmds[i].val, val & pmu_conf.perf_ovfl_val)); /* collect latest results */ @@ -3389,15 +3631,19 @@ */ task->thread.pmd[i] = 0; - /* take care of overflow inline */ - if (mask & 0x1) { + /* + * take care of overflow inline + */ + if (pmc0 & (1UL << i)) { ctx->ctx_soft_pmds[i].val += 1 + pmu_conf.perf_ovfl_val; DBprintk(("[%d] pmd[%d] overflowed soft_pmd=0x%lx\n", task->pid, i, ctx->ctx_soft_pmds[i].val)); } } else { DBprintk(("[%d] pmd[%d] hw_pmd=0x%lx\n", task->pid, i, val)); - /* not a counter, just save value as is */ + /* + * not a counter, just save value as is + */ task->thread.pmd[i] = val; } } @@ -3409,38 +3655,78 @@ } - /* * task is the newly created task, pt_regs for new child */ int pfm_inherit(struct task_struct *task, struct pt_regs *regs) { - pfm_context_t *ctx = current->thread.pfm_context; + pfm_context_t *ctx; pfm_context_t *nctx; - struct thread_struct *th = &task->thread; + struct thread_struct *thread; unsigned long m; int i; /* + * the new task was copied from parent and therefore points + * to the parent's context at this point + */ + ctx = task->thread.pfm_context; + thread = &task->thread; + + /* * make sure child cannot mess up the monitoring session */ ia64_psr(regs)->sp = 1; DBprintk(("enabling psr.sp for [%d]\n", task->pid)); - /* - * remove any sampling buffer mapping from child user - * address space. Must be done for all cases of inheritance. - */ - if (ctx->ctx_smpl_vaddr) pfm_remove_smpl_mapping(task); + + /* + * if there was a virtual mapping for the sampling buffer + * the mapping is NOT inherited across fork() (see VM_DONTCOPY), + * so we don't have to explicitely remove it here. + * + * + * Part of the clearing of fields is also done in + * copy_thread() because the fiels are outside the + * pfm_context structure and can affect tasks not + * using perfmon. + */ + + /* clear pending notification */ + task->thread.pfm_ovfl_block_reset = 0; + + /* + * clear cpu pinning restriction for child + */ + if (ctx->ctx_fl_system) { + set_cpus_allowed(task, ctx->ctx_saved_cpus_allowed); + + DBprintk(("setting cpus_allowed for [%d] to 0x%lx from 0x%lx\n", + task->pid, + ctx->ctx_saved_cpus_allowed, + current->cpus_allowed)); + } /* * takes care of easiest case first */ if (CTX_INHERIT_MODE(ctx) == PFM_FL_INHERIT_NONE) { + DBprintk(("removing PFM context for [%d]\n", task->pid)); - task->thread.pfm_context = NULL; - task->thread.pfm_ovfl_block_reset = 0; + + task->thread.pfm_context = NULL; + + /* + * we must clear psr.up because the new child does + * not have a context and the PM_VALID flag is cleared + * in copy_thread(). + * + * we do not clear psr.pp because it is always + * controlled by the system wide logic and we should + * never be here when system wide is running anyway + */ + ia64_psr(regs)->up = 0; /* copy_thread() clears IA64_THREAD_PM_VALID */ return 0; @@ -3454,69 +3740,82 @@ if (CTX_INHERIT_MODE(ctx) == PFM_FL_INHERIT_ONCE) { nctx->ctx_fl_inherit = PFM_FL_INHERIT_NONE; - atomic_set(&nctx->ctx_last_cpu, -1); - - /* - * task is not yet visible in the tasklist, so we do - * not need to lock the newly created context. - * However, we must grab the tasklist_lock to ensure - * that the ctx_owner or ctx_notify_task do not disappear - * while we increment their check counters. - */ - read_lock(&tasklist_lock); + DBprintk(("downgrading to INHERIT_NONE for [%d]\n", task->pid)); + } + /* + * task is not yet visible in the tasklist, so we do + * not need to lock the newly created context. + * However, we must grab the tasklist_lock to ensure + * that the ctx_owner or ctx_notify_task do not disappear + * while we increment their check counters. + */ + read_lock(&tasklist_lock); - if (nctx->ctx_notify_task) - atomic_inc(&nctx->ctx_notify_task->thread.pfm_notifiers_check); + if (nctx->ctx_notify_task) + atomic_inc(&nctx->ctx_notify_task->thread.pfm_notifiers_check); - if (nctx->ctx_owner) - atomic_inc(&nctx->ctx_owner->thread.pfm_owners_check); + if (nctx->ctx_owner) + atomic_inc(&nctx->ctx_owner->thread.pfm_owners_check); - read_unlock(&tasklist_lock); + read_unlock(&tasklist_lock); - DBprintk(("downgrading to INHERIT_NONE for [%d]\n", task->pid)); - LOCK_PFS(); - pfm_sessions.pfs_task_sessions++; - UNLOCK_PFS(); - } + LOCK_PFS(); + pfm_sessions.pfs_task_sessions++; + UNLOCK_PFS(); /* initialize counters in new context */ - m = pmu_conf.counter_pmds[0] >> PMU_FIRST_COUNTER; + m = nctx->ctx_used_pmds[0] >> PMU_FIRST_COUNTER; for(i = PMU_FIRST_COUNTER ; m ; m>>=1, i++) { - if (m & 0x1) { + if ((m & 0x1) && pmu_conf.pmd_desc[i].type == PFM_REG_COUNTING) { nctx->ctx_soft_pmds[i].val = nctx->ctx_soft_pmds[i].ival & ~pmu_conf.perf_ovfl_val; - th->pmd[i] = nctx->ctx_soft_pmds[i].ival & pmu_conf.perf_ovfl_val; + thread->pmd[i] = nctx->ctx_soft_pmds[i].ival & pmu_conf.perf_ovfl_val; } + /* what about the other pmds? zero or keep as is */ } - /* clear BTB index register */ - th->pmd[16] = 0; + /* + * clear BTB index register + * XXX: CPU-model specific knowledge! + */ + thread->pmd[16] = 0; - /* if sampling then increment number of users of buffer */ - if (nctx->ctx_psb) { - /* - * XXX: nopt very pretty! - */ + nctx->ctx_fl_frozen = 0; + nctx->ctx_ovfl_regs[0] = 0UL; + atomic_set(&nctx->ctx_last_cpu, -1); + + /* + * here nctx->ctx_psb == ctx->ctx_psb + * + * increment reference count to sampling + * buffer, if any. Note that this is independent + * from the virtual mapping. The latter is never + * inherited while the former will be if context + * is setup to something different from PFM_FL_INHERIT_NONE + */ + if (nctx->ctx_psb) { LOCK_PSB(nctx->ctx_psb); + nctx->ctx_psb->psb_refcnt++; + + DBprintk(("updated smpl @ %p refcnt=%lu psb_flags=0x%x\n", + ctx->ctx_psb->psb_hdr, + ctx->ctx_psb->psb_refcnt, + ctx->ctx_psb->psb_flags)); + UNLOCK_PSB(nctx->ctx_psb); + /* * remove any pointer to sampling buffer mapping */ nctx->ctx_smpl_vaddr = 0; } - nctx->ctx_fl_frozen = 0; - nctx->ctx_ovfl_regs[0] = 0UL; - sema_init(&nctx->ctx_restart_sem, 0); /* reset this semaphore to locked */ - /* clear pending notification */ - th->pfm_ovfl_block_reset = 0; - /* link with new task */ - th->pfm_context = nctx; + thread->pfm_context = nctx; DBprintk(("nctx=%p for process [%d]\n", (void *)nctx, task->pid)); @@ -3526,7 +3825,7 @@ */ if (current->thread.flags & IA64_THREAD_PM_VALID) { DBprintk(("setting PM_VALID for [%d]\n", task->pid)); - th->flags |= IA64_THREAD_PM_VALID; + thread->flags |= IA64_THREAD_PM_VALID; } return 0; @@ -3555,9 +3854,9 @@ LOCK_PSB(psb); - DBprintk(("sampling buffer from [%d] @%p size %ld vma_flag=0x%x\n", + DBprintk(("sampling buffer from [%d] @%p size %ld refcnt=%lu psb_flags=0x%x\n", task->pid, - psb->psb_hdr, psb->psb_size, psb->psb_flags)); + psb->psb_hdr, psb->psb_size, psb->psb_refcnt, psb->psb_flags)); /* * in the case where we are the last user, we may be able to free @@ -3580,7 +3879,7 @@ * * See pfm_vm_close() and pfm_cleanup_smpl_buf() for more details. */ - if ((psb->psb_flags & PFM_PSB_VMA) == 0) { + if ((psb->psb_flags & PSB_HAS_VMA) == 0) { DBprintk(("cleaning sampling buffer from [%d] @%p size %ld\n", task->pid, @@ -3612,7 +3911,7 @@ * direct pointer to a task structure thereby bypassing the tasklist. * We must make sure that, if we have task!= NULL, the target task is still * present and is identical to the initial task specified - * during pfm_create_context(). It may already be detached from the tasklist but + * during pfm_context_create(). It may already be detached from the tasklist but * that's okay. Note that it is okay if we miss the deadline and the task scans * the list for nothing, it will affect performance but not correctness. * The correctness is ensured by using the ctx_lock which prevents the @@ -3761,6 +4060,8 @@ } } read_unlock(&tasklist_lock); + + atomic_set(&task->thread.pfm_owners_check, 0); } @@ -3818,6 +4119,8 @@ } } read_unlock(&tasklist_lock); + + atomic_set(&task->thread.pfm_notifiers_check, 0); } static struct irqaction perfmon_irqaction = { @@ -3836,6 +4139,12 @@ if (i >= pmu_conf.num_pmcs) break; if (PMC_IS_IMPL(i)) reset_pmcs[i] = ia64_get_pmc(i); } +#ifdef CONFIG_MCKINLEY + /* + * set the 'stupid' enable bit to power the PMU! + */ + reset_pmcs[4] |= 1UL << 23; +#endif } /* @@ -3903,23 +4212,12 @@ */ pfm_pmu_snapshot(); - /* - * list the pmc registers used to control monitors - * XXX: unfortunately this information is not provided by PAL - * - * We start with the architected minimum and then refine for each CPU model - */ - pmu_conf.monitor_pmcs[0] = PMM(4)|PMM(5)|PMM(6)|PMM(7); - /* - * architected counters + * setup the register configuration descriptions for the CPU */ - pmu_conf.counter_pmds[0] |= PMM(4)|PMM(5)|PMM(6)|PMM(7); + pmu_conf.pmc_desc = pmc_desc; + pmu_conf.pmd_desc = pmd_desc; -#ifdef CONFIG_ITANIUM - pmu_conf.monitor_pmcs[0] |= PMM(10)|PMM(11)|PMM(12); - /* Itanium does not add more counters */ -#endif /* we are all set */ pmu_conf.pfm_is_disabled = 0; @@ -3928,6 +4226,8 @@ */ perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL); + pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0); + spin_lock_init(&pfm_sessions.pfs_lock); return 0; @@ -3941,7 +4241,6 @@ ia64_set_pmv(IA64_PERFMON_VECTOR); ia64_srlz_d(); } - #else /* !CONFIG_PERFMON */ diff -Nru a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c --- a/arch/ia64/kernel/process.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/process.c Thu May 30 21:28:59 2002 @@ -194,13 +194,15 @@ pfm_save_regs(task); # ifdef CONFIG_SMP - if (local_cpu_data->pfm_syst_wide) + if (this_cpu(pfm_syst_wide)) pfm_syst_wide_update_task(task, 0); # endif #endif +#ifdef CONFIG_IA32_SUPPORT if (IS_IA32_PROCESS(ia64_task_regs(task))) ia32_save_state(task); +#endif } void @@ -214,12 +216,14 @@ pfm_load_regs(task); # ifdef CONFIG_SMP - if (local_cpu_data->pfm_syst_wide) pfm_syst_wide_update_task(task, 1); + if (this_cpu(pfm_syst_wide)) pfm_syst_wide_update_task(task, 1); # endif #endif +#ifdef CONFIG_IA32_SUPPORT if (IS_IA32_PROCESS(ia64_task_regs(task))) ia32_load_state(task); +#endif } /* @@ -357,6 +361,8 @@ */ atomic_set(&p->thread.pfm_notifiers_check, 0); atomic_set(&p->thread.pfm_owners_check, 0); + /* clear list of sampling buffer to free for new task */ + p->thread.pfm_smpl_buf_list = NULL; if (current->thread.pfm_context) retval = pfm_inherit(p, child_ptregs); #endif @@ -566,9 +572,8 @@ pfm_flush_regs(current); /* free debug register resources */ - if ((current->thread.flags & IA64_THREAD_DBG_VALID) != 0) { + if (current->thread.flags & IA64_THREAD_DBG_VALID) pfm_release_debug_registers(current); - } #endif } diff -Nru a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c --- a/arch/ia64/kernel/setup.c Thu May 30 21:28:58 2002 +++ b/arch/ia64/kernel/setup.c Thu May 30 21:28:58 2002 @@ -395,7 +395,7 @@ switch (c->family) { case 0x07: memcpy(family, "Itanium", 8); break; - case 0x1f: memcpy(family, "Itanium 2", 9); break; + case 0x1f: memcpy(family, "Itanium 2", 10); break; default: sprintf(family, "%u", c->family); break; } @@ -542,7 +542,18 @@ extern char __per_cpu_end[]; int cpu = smp_processor_id(); - my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start); + if (__per_cpu_end - __per_cpu_start > PAGE_SIZE) + panic("Per-cpu data area too big! (%Zu > %Zu)", + __per_cpu_end - __per_cpu_start, PAGE_SIZE); + + /* + * On the BSP, the page allocator isn't initialized by the time we get here. On + * the APs, the bootmem allocator is no longer available... + */ + if (cpu == 0) + my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start); + else + my_cpu_data = (void *) get_free_page(GFP_KERNEL); memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); __per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start; my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start); @@ -559,6 +570,20 @@ */ identify_cpu(my_cpu_info); +#ifdef CONFIG_MCKINLEY + { +#define FEATURE_SET 16 + struct ia64_pal_retval iprv; + + if (my_cpu_info->family == 0x1f) { + PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); + if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) + PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, + (iprv.v1 | 0x80), FEATURE_SET, 0); + } + } +#endif + /* Clear the stack memory reserved for pt_regs: */ memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); @@ -570,7 +595,7 @@ * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ - ia64_set_dcr( IA64_DCR_DM | IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR + ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC); #ifndef CONFIG_SMP ia64_set_fpu_owner(0); diff -Nru a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c --- a/arch/ia64/kernel/signal.c Thu May 30 21:28:58 2002 +++ b/arch/ia64/kernel/signal.c Thu May 30 21:28:58 2002 @@ -143,9 +143,10 @@ { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; - if (from->si_code < 0) - return __copy_to_user(to, from, sizeof(siginfo_t)); - else { + if (from->si_code < 0) { + if (__copy_to_user(to, from, sizeof(siginfo_t))) + return -EFAULT; + } else { int err; /* diff -Nru a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c --- a/arch/ia64/kernel/smpboot.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/kernel/smpboot.c Thu May 30 21:28:59 2002 @@ -428,7 +428,7 @@ task_for_booting_cpu = idle; - Dprintk("Sending wakeup vector %u to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); + Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); @@ -543,7 +543,7 @@ printk("Before bogomips.\n"); if (!cpucount) { - printk(KERN_ERR "Error: only one processor found.\n"); + printk(KERN_WARNING "Warning: only one processor found.\n"); } else { unsigned long bogosum = 0; for (cpu = 0; cpu < NR_CPUS; cpu++) diff -Nru a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c --- a/arch/ia64/lib/swiotlb.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/lib/swiotlb.c Thu May 30 21:28:59 2002 @@ -478,6 +478,17 @@ return SG_ENT_PHYS_ADDRESS(sg); } +/* + * Return whether the given PCI device DMA address mask can be supported properly. For + * example, if your device can only drive the low 24-bits during PCI bus mastering, then + * you would pass 0x00ffffff as the mask to this function. + */ +int +swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask) +{ + return 1; +} + EXPORT_SYMBOL(swiotlb_init); EXPORT_SYMBOL(swiotlb_map_single); EXPORT_SYMBOL(swiotlb_unmap_single); @@ -488,3 +499,4 @@ EXPORT_SYMBOL(swiotlb_dma_address); EXPORT_SYMBOL(swiotlb_alloc_consistent); EXPORT_SYMBOL(swiotlb_free_consistent); +EXPORT_SYMBOL(swiotlb_pci_dma_supported); diff -Nru a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c --- a/arch/ia64/mm/init.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/mm/init.c Thu May 30 21:28:59 2002 @@ -206,7 +206,6 @@ printk("%d pages shared\n", shared); printk("%d pages swap cached\n", cached); printk("%ld pages in page table cache\n", pgtable_cache_size); - printk("%ld buffermem pages\n", nr_buffermem_pages()); #endif /* !CONFIG_DISCONTIGMEM */ } @@ -251,7 +250,7 @@ void __init ia64_mmu_init (void *my_cpu_data) { - unsigned long flags, rid, pta, impl_va_bits; + unsigned long psr, rid, pta, impl_va_bits; extern void __init tlb_init (void); #ifdef CONFIG_DISABLE_VHPT # define VHPT_ENABLE_BIT 0 @@ -263,7 +262,7 @@ * Set up the kernel identity mapping for regions 6 and 5. The mapping for region * 7 is setup up in _start(). */ - ia64_clear_ic(flags); + psr = ia64_clear_ic(); rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET); ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2)); @@ -277,7 +276,7 @@ ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), PAGE_SHIFT); - __restore_flags(flags); + ia64_set_psr(psr); ia64_srlz_i(); /* diff -Nru a/arch/ia64/sn/io/Makefile b/arch/ia64/sn/io/Makefile --- a/arch/ia64/sn/io/Makefile Thu May 30 21:28:59 2002 +++ b/arch/ia64/sn/io/Makefile Thu May 30 21:28:59 2002 @@ -18,7 +18,7 @@ O_TARGET := sgiio.o ifeq ($(CONFIG_MODULES),y) -export-objs = pciio.o hcl.o +export-objs = pciio.o hcl.o pci_dma.o endif obj-y := stubs.o sgi_if.o pciio.o xtalk.o xbow.o xswitch.o klgraph_hack.o \ diff -Nru a/arch/ia64/sn/io/pci_dma.c b/arch/ia64/sn/io/pci_dma.c --- a/arch/ia64/sn/io/pci_dma.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/sn/io/pci_dma.c Thu May 30 21:28:59 2002 @@ -4,6 +4,9 @@ * for more details. * * Copyright (C) 2000,2002 Silicon Graphics, Inc. All rights reserved. + * + * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for + * a description of how these routines should be used. */ #include @@ -12,6 +15,7 @@ #include #include #include +#include #include #include @@ -46,7 +50,7 @@ /* * Darn, we need to get the maps allocated for this bus. */ - for (i=0; ilength, - PCIIO_BYTE_STREAM | PCIIO_DMA_DATA); -#else - dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length, PCIIO_DMA_DATA); -#endif + dma_map = 0; + dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length, + DMA_DATA_FLAGS); if (!dma_map) { - printk("pci_map_sg: Unable to allocate anymore 32Bits Page Map entries.\n"); + printk(KERN_ERR "sn_pci_map_sg: Unable to allocate " + "anymore 32 bit page map entries.\n"); BUG(); } - dma_addr = (dma_addr_t)pciio_dmamap_addr(dma_map, temp_ptr, sg->length); - /* printk("pci_map_sg: dma_map 0x%p Phys Addr 0x%p dma_addr 0x%p\n", dma_map, temp_ptr, dma_addr); */ + dma_addr = pciio_dmamap_addr(dma_map, phys_addr, sg->length); sg->address = (char *)dma_addr; sg->page = (char *)dma_map; @@ -372,7 +372,17 @@ } -/* +/** + * sn_pci_map_single - map a single region for DMA + * @hwdev: device to map for + * @ptr: kernel virtual address of the region to map + * @size: size of the region + * @direction: DMA direction + * + * Map the region pointed to by @ptr for DMA and return the + * DMA address. Also known as platform_pci_map_single() by + * the IA64 machvec code. + * * We map this to the one step pciio_dmamap_trans interface rather than * the two step pciio_dmamap_alloc/pciio_dmamap_addr because we have * no way of saving the dmamap handle from the alloc to later free diff -Nru a/arch/ia64/sn/kernel/misctest.c b/arch/ia64/sn/kernel/misctest.c --- a/arch/ia64/sn/kernel/misctest.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/sn/kernel/misctest.c Thu May 30 21:28:59 2002 @@ -75,7 +75,7 @@ if (mcatest == 5) { int zzzspec(long); int i; - long flags, dcr, res, val, addr=0xff00000000UL; + long psr, dcr, res, val, addr=0xff00000000UL; dcr = ia64_get_dcr(); for (i=0; i<5; i++) { @@ -87,11 +87,11 @@ ia64_set_dcr(dcr); res = ia64_sn_probe_io_slot(0xff00000000UL, 8, &val); printk("zzzspec: probe %ld, 0x%lx\n", res, val); - ia64_clear_ic(flags); + psr = ia64_clear_ic(); ia64_itc(0x2, 0xe00000ff00000000UL, pte_val(pfn_pte(0xff00000000UL >> PAGE_SHIFT, __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), _PAGE_SIZE_256M); - local_irq_restore(flags); + ia64_set_psr(psr); ia64_srlz_i (); } diff -Nru a/arch/ia64/tools/print_offsets.awk b/arch/ia64/tools/print_offsets.awk --- a/arch/ia64/tools/print_offsets.awk Thu May 30 21:28:59 2002 +++ b/arch/ia64/tools/print_offsets.awk Thu May 30 21:28:59 2002 @@ -7,6 +7,8 @@ print " * This file was generated by arch/ia64/tools/print_offsets.awk." print " *" print " */" + print "" + print "#define CLONE_IDLETASK_BIT 12" } # look for .tab: diff -Nru a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c --- a/arch/ia64/tools/print_offsets.c Thu May 30 21:28:59 2002 +++ b/arch/ia64/tools/print_offsets.c Thu May 30 21:28:59 2002 @@ -53,6 +53,7 @@ { "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) }, { "", 0 }, /* spacer */ { "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) }, + { "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) }, { "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) }, { "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) }, { "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) }, @@ -161,6 +162,7 @@ { "IA64_SIGFRAME_ARG2_OFFSET", offsetof (struct sigframe, arg2) }, { "IA64_SIGFRAME_HANDLER_OFFSET", offsetof (struct sigframe, handler) }, { "IA64_SIGFRAME_SIGCONTEXT_OFFSET", offsetof (struct sigframe, sc) }, + /* for assembly files which can't include sched.h: */ { "IA64_CLONE_VFORK", CLONE_VFORK }, { "IA64_CLONE_VM", CLONE_VM }, }; @@ -198,6 +200,8 @@ tab[i].name, space, tab[i].value, tab[i].value); } } + + printf ("\n#define CLONE_IDLETASK_BIT %ld\n", ia64_fls (CLONE_IDLETASK)); printf ("\n#endif /* _ASM_IA64_OFFSETS_H */\n"); return 0; diff -Nru a/arch/ia64/vmlinux.lds.S b/arch/ia64/vmlinux.lds.S --- a/arch/ia64/vmlinux.lds.S Thu May 30 21:28:59 2002 +++ b/arch/ia64/vmlinux.lds.S Thu May 30 21:28:59 2002 @@ -41,7 +41,8 @@ /* Read-only data */ - __gp = ALIGN(16) + 0x200000; /* gp must be 16-byte aligned for exc. table */ + . = ALIGN(16); + __gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */ /* Global data */ _data = .; diff -Nru a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c --- a/arch/parisc/kernel/traps.c Thu May 30 21:28:59 2002 +++ b/arch/parisc/kernel/traps.c Thu May 30 21:28:59 2002 @@ -43,7 +43,6 @@ static inline void console_verbose(void) { - extern int console_loglevel; console_loglevel = 15; } diff -Nru a/drivers/acpi/acpi_bus.c b/drivers/acpi/acpi_bus.c --- a/drivers/acpi/acpi_bus.c Thu May 30 21:28:59 2002 +++ b/drivers/acpi/acpi_bus.c Thu May 30 21:28:59 2002 @@ -1053,7 +1053,7 @@ if (!cid[0]) return -ENOENT; - if (0 != strstr(cid, device->pnp.hardware_id)) + if (0 != strstr(driver->ids, cid)) return 0; } @@ -2194,7 +2194,7 @@ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) -subsys_initcall(acpi_init); +arch_initcall(acpi_init); #endif __setup("acpi=", acpi_setup); diff -Nru a/drivers/acpi/acpi_osl.c b/drivers/acpi/acpi_osl.c --- a/drivers/acpi/acpi_osl.c Thu May 30 21:28:59 2002 +++ b/drivers/acpi/acpi_osl.c Thu May 30 21:28:59 2002 @@ -77,7 +77,7 @@ * Initialize PCI configuration space access, as we'll need to access * it while walking the namespace (bus 0 and root bridges w/ _BBNs). */ -#if 0 +#if 1 pcibios_config_init(); if (!pci_config_read || !pci_config_write) { printk(KERN_ERR PREFIX "Access to PCI configuration space unavailable\n"); @@ -159,9 +159,9 @@ #else /*CONFIG_ACPI_EFI*/ addr->pointer_type = ACPI_PHYSICAL_POINTER; if (efi.acpi20) - addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) efi.acpi20; + addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) virt_to_phys(efi.acpi20); else if (efi.acpi) - addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) efi.acpi; + addr->pointer.physical = (ACPI_PHYSICAL_ADDRESS) virt_to_phys(efi.acpi); else { printk(KERN_ERR PREFIX "System description tables not found\n"); addr->pointer.physical = 0; @@ -175,6 +175,13 @@ acpi_status acpi_os_map_memory(ACPI_PHYSICAL_ADDRESS phys, ACPI_SIZE size, void **virt) { +#ifdef CONFIG_ACPI_EFI + if (!(EFI_MEMORY_WB & efi_mem_attributes(phys))) { + *virt = ioremap(phys, size); + } else { + *virt = phys_to_virt(phys); + } +#else if (phys > ULONG_MAX) { printk(KERN_ERR PREFIX "Cannot map memory that high\n"); return AE_BAD_PARAMETER; @@ -184,6 +191,7 @@ * ioremap already checks to ensure this is in reserved space */ *virt = ioremap((unsigned long) phys, size); +#endif if (!*virt) return AE_NO_MEMORY; @@ -325,26 +333,41 @@ void *value, u32 width) { - u32 dummy; - + u32 dummy; + int iomem = 0; + void *virt_addr; + +#ifdef CONFIG_ACPI_EFI + if (EFI_MEMORY_UC & efi_mem_attributes(phys_addr)) { + iomem = 1; + virt_addr = ioremap(phys_addr, width); + } else { + virt_addr = phys_to_virt(phys_addr); + } +#else + virt_addr = phys_to_virt(phys_addr); +#endif if (!value) value = &dummy; switch (width) { case 8: - *(u8*) value = *(u8*) phys_to_virt(phys_addr); + *(u8*) value = *(u8*) virt_addr; break; case 16: - *(u16*) value = *(u16*) phys_to_virt(phys_addr); + *(u16*) value = *(u16*) virt_addr; break; case 32: - *(u32*) value = *(u32*) phys_to_virt(phys_addr); + *(u32*) value = *(u32*) virt_addr; break; default: BUG(); } + if (iomem) + iounmap(virt_addr); + return AE_OK; } @@ -354,20 +377,37 @@ acpi_integer value, u32 width) { + int iomem = 0; + void *virt_addr; + +#ifdef CONFIG_ACPI_EFI + if (EFI_MEMORY_UC & efi_mem_attributes(phys_addr)) { + iomem = 1; + virt_addr = ioremap(phys_addr,width); + } else { + virt_addr = phys_to_virt(phys_addr); + } +#else + virt_addr = phys_to_virt(phys_addr); +#endif + switch (width) { case 8: - *(u8*) phys_to_virt(phys_addr) = value; + *(u8*) virt_addr = value; break; case 16: - *(u16*) phys_to_virt(phys_addr) = value; + *(u16*) virt_addr = value; break; case 32: - *(u32*) phys_to_virt(phys_addr) = value; + *(u32*) virt_addr = value; break; default: BUG(); } + + if (iomem) + iounmap(virt_addr); return AE_OK; } diff -Nru a/drivers/acpi/acpi_pci_root.c b/drivers/acpi/acpi_pci_root.c --- a/drivers/acpi/acpi_pci_root.c Thu May 30 21:28:59 2002 +++ b/drivers/acpi/acpi_pci_root.c Thu May 30 21:28:59 2002 @@ -1,5 +1,5 @@ /* - * acpi_pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 30 $) + * acpi_pci_root.c - ACPI PCI Root Bridge Driver ($Revision: 31 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh @@ -21,6 +21,9 @@ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Copyright (C) 2002 J.I. Lee + * 02/04/18 J.I. Lee Allowed defered prt parsing on PCI root bridges + * that have no immediate prts. */ #include @@ -48,7 +51,7 @@ static int acpi_pci_root_add (struct acpi_device *device); static int acpi_pci_root_remove (struct acpi_device *device, int type); -static int acpi_pci_root_bind (struct acpi_device *device); +static int acpi_pci_bind (struct acpi_device *device); static struct acpi_driver acpi_pci_root_driver = { name: ACPI_PCI_ROOT_DRIVER_NAME, @@ -57,12 +60,13 @@ ops: { add: acpi_pci_root_add, remove: acpi_pci_root_remove, - bind: acpi_pci_root_bind, + bind: acpi_pci_bind, }, }; struct acpi_pci_data { acpi_pci_id id; + struct pci_bus *bus; struct pci_dev *dev; }; @@ -296,7 +300,7 @@ buffer.pointer = pathname; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - printk(KERN_INFO PREFIX "%s [%s._PRT]\n", ACPI_PCI_PRT_DEVICE_NAME, + printk(KERN_INFO PREFIX "%s in [%s]\n", ACPI_PCI_PRT_DEVICE_NAME, pathname); /* @@ -307,8 +311,7 @@ buffer.pointer = NULL; status = acpi_get_irq_routing_table(handle, &buffer); if (status != AE_BUFFER_OVERFLOW) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error evaluating _PRT [%s]\n", + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRT [%s]\n", acpi_format_exception(status))); return_VALUE(-ENODEV); } @@ -321,8 +324,7 @@ status = acpi_get_irq_routing_table(handle, &buffer); if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error evaluating _PRT [%s]\n", + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error evaluating _PRT [%s]\n", acpi_format_exception(status))); kfree(buffer.pointer); return_VALUE(-ENODEV); @@ -407,16 +409,16 @@ static int -acpi_pci_root_bind ( +acpi_pci_bind ( struct acpi_device *device) { int result = 0; acpi_status status = AE_OK; struct acpi_pci_data *data = NULL; - struct acpi_pci_data *parent_data = NULL; + struct acpi_pci_data *pdata = NULL; acpi_handle handle = NULL; - ACPI_FUNCTION_TRACE("acpi_pci_root_bind"); + ACPI_FUNCTION_TRACE("acpi_pci_bind"); if (!device || !device->parent) return_VALUE(-EINVAL); @@ -432,12 +434,11 @@ /* * Segment & Bus * ------------- - * These are obtained via the parent device's ACPI-PCI context.. - * Note that PCI root bridge devices don't have a 'dev->subordinate'. + * These are obtained via the parent device's ACPI-PCI context. */ status = acpi_get_data(device->parent->handle, acpi_pci_data_handler, - (void**) &parent_data); - if (ACPI_FAILURE(status) || !parent_data || !parent_data->dev) { + (void**) &pdata); + if (ACPI_FAILURE(status) || !pdata || !pdata->bus) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid ACPI-PCI context for parent device %s\n", acpi_device_bid(device->parent))); @@ -445,19 +446,9 @@ goto end; } - data->id.segment = parent_data->id.segment; + data->id.segment = pdata->id.segment; - if (parent_data->dev->subordinate) /* e.g. PCI-PCI bridge */ - data->id.bus = parent_data->dev->subordinate->number; - else if (parent_data->dev->bus) /* PCI root bridge */ - data->id.bus = parent_data->dev->bus->number; - else { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Parent device %s is not a PCI bridge\n", - acpi_device_bid(device->parent))); - result = -ENODEV; - goto end; - } + data->id.bus = pdata->bus->number; /* * Device & Function @@ -474,6 +465,10 @@ data->id.segment, data->id.bus, data->id.device, data->id.function)); + /* + * TBD: Support slot devices (e.g. function=0xFFFF). + */ + /* * Locate PCI Device * ----------------- @@ -501,6 +496,21 @@ } /* + * PCI Bridge? + * ----------- + * If so, set the 'bus' field and install the 'bind' function to + * facilitate callbacks for all of its children. + */ + if (data->dev->subordinate) { + data->bus = data->dev->subordinate; + device->ops.bind = acpi_pci_bind; + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Device %02x:%02x:%02x.%02x is a PCI bridge\n", + data->id.segment, data->id.bus, + data->id.device, data->id.function)); + } + + /* * Attach ACPI-PCI Context * ----------------------- * Thus binding the ACPI and PCI devices. @@ -515,15 +525,6 @@ } /* - * PCI Bridge? - * ----------- - * If so, install the 'bind' function to facilitate callbacks for - * all of its children. - */ - if (data->dev->subordinate) - device->ops.bind = acpi_pci_root_bind; - - /* * PCI Routing Table * ----------------- * Evaluate and parse _PRT, if exists. This code is independent of @@ -535,9 +536,9 @@ */ status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); if (ACPI_SUCCESS(status)) { - if (data->dev->subordinate) /* PCI-PCI bridge */ + if (data->bus) /* PCI-PCI bridge */ acpi_prt_parse(device->handle, data->id.segment, - data->dev->subordinate->number); + data->bus->number); else /* non-bridge PCI device */ acpi_prt_parse(device->handle, data->id.segment, data->id.bus); @@ -563,6 +564,7 @@ struct acpi_pci_root *root = NULL; acpi_status status = AE_OK; unsigned long value = 0; + acpi_handle handle = NULL; ACPI_FUNCTION_TRACE("acpi_pci_root_add"); @@ -582,7 +584,7 @@ /* * TBD: Doesn't the bus driver automatically set this? */ - device->ops.bind = acpi_pci_root_bind; + device->ops.bind = acpi_pci_bind; /* * Segment @@ -596,7 +598,8 @@ root->data.id.segment = (u16) value; break; case AE_NOT_FOUND: - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming segment 0 (no _SEG)\n")); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Assuming segment 0 (no _SEG)\n")); root->data.id.segment = 0; break; default: @@ -639,10 +642,9 @@ * TBD: Need PCI interface for enumeration/configuration of roots. */ - printk(KERN_INFO PREFIX "%s [%s] (%02x:%02x:%02x.%02x)\n", + printk(KERN_INFO PREFIX "%s [%s] (%02x:%02x)\n", acpi_device_name(device), acpi_device_bid(device), - root->data.id.segment, root->data.id.bus, - root->data.id.device, root->data.id.function); + root->data.id.segment, root->data.id.bus); /* * Scan the Root Bridge @@ -651,20 +653,11 @@ * PCI namespace does not get created until this call is made (and * thus the root bridge's pci_dev does not exist). */ - pci_scan_bus(root->data.id.bus, pci_root_ops, NULL); - - /* - * Locate PCI Device - * ----------------- - * Locate the matching PCI root bridge device in the PCI namespace. - */ - root->data.dev = pci_find_slot(root->data.id.bus, - PCI_DEVFN(root->data.id.device, root->data.id.function)); - if (!root->data.dev) { + root->data.bus = pcibios_scan_root(root->data.id.segment, root->data.id.bus); + if (!root->data.bus) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Device %02x:%02x:%02x.%02x not present\n", - root->data.id.segment, root->data.id.bus, - root->data.id.device, root->data.id.function)); + "Bus %02x:%02x not present in PCI namespace\n", + root->data.id.segment, root->data.id.bus)); result = -ENODEV; goto end; } @@ -672,7 +665,8 @@ /* * Attach ACPI-PCI Context * ----------------------- - * Thus binding the ACPI and PCI devices. + * Thus binding the ACPI and PCI devices. Note that PCI root bridges + * never set a 'data.dev' member (rely on 'data.bus' instead). */ status = acpi_attach_data(root->handle, acpi_pci_data_handler, &root->data); @@ -687,9 +681,27 @@ /* * PCI Routing Table * ----------------- - * Evaluate and parse _PRT, if exists. Note that root bridges - * must have a _PRT (optional for subordinate bridges). + * Evaluate and parse _PRT, if exists. Note that root bridges MUST + * have a _PRT (optional for PCI-PCI bridges). + * + */ + + /* + * J.I. + * Some PCI Root Brides can have no immediate _PRTs, + * in that case, _PRTs are buried in child devices. + * So, let's pass even if Root PCI bridge has no immediate _PRT + * and defer the _PRT parsing until we get somewhere down there. */ + status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle); + if (ACPI_FAILURE(status)) { + ACPI_DEBUG_PRINT((ACPI_DB_WARN, + "Deferred _PRT parsing for PCI Root bridge(s:b=0x%x:%x)...\n", + root->data.id.segment, root->data.id.bus)); + //result = 0; + goto end; + } + result = acpi_prt_parse(device->handle, root->data.id.segment, root->data.id.bus); diff -Nru a/drivers/acpi/acpi_system.c b/drivers/acpi/acpi_system.c --- a/drivers/acpi/acpi_system.c Thu May 30 21:28:59 2002 +++ b/drivers/acpi/acpi_system.c Thu May 30 21:28:59 2002 @@ -185,7 +185,7 @@ #endif /* flush caches */ - wbinvd(); + ACPI_FLUSH_CPU_CACHE(); /* Do arch specific saving of state. */ if (state > ACPI_STATE_S1) { @@ -311,7 +311,7 @@ /* disable interrupts and flush caches */ ACPI_DISABLE_IRQS(); - wbinvd(); + ACPI_FLUSH_CPU_CACHE(); /* perform OS-specific sleep actions */ status = acpi_system_suspend(state); diff -Nru a/drivers/acpi/include/platform/aclinux.h b/drivers/acpi/include/platform/aclinux.h --- a/drivers/acpi/include/platform/aclinux.h Thu May 30 21:28:59 2002 +++ b/drivers/acpi/include/platform/aclinux.h Thu May 30 21:28:59 2002 @@ -42,7 +42,7 @@ #define strtoul simple_strtoul -#ifdef _IA64 +#ifdef CONFIG_IA64 #define ACPI_FLUSH_CPU_CACHE() #else #define ACPI_FLUSH_CPU_CACHE() wbinvd() diff -Nru a/drivers/char/Config.help b/drivers/char/Config.help --- a/drivers/char/Config.help Thu May 30 21:28:59 2002 +++ b/drivers/char/Config.help Thu May 30 21:28:59 2002 @@ -127,6 +127,10 @@ 815 and 830m chipset boards for their on-board integrated graphics. This is required to do any useful video modes with these boards. +CONFIG_AGP_I460 + This option gives you AGP GART support for the Intel 460GX chipset + for IA64 processors. + CONFIG_AGP_VIA This option gives you AGP support for the GLX component of the XFree86 4.x on VIA MPV3/Apollo Pro chipsets. @@ -169,6 +173,10 @@ You should say Y here if you use XFree86 3.3.6 or 4.x and want to use GLX or DRI. If unsure, say N. + +CONFIG_AGP_HP_ZX1 + This option gives you AGP GART support for the HP ZX1 chipset + for IA64 processors. CONFIG_I810_TCO Hardware driver for the TCO timer built into the Intel i810 and i815 diff -Nru a/drivers/char/Config.in b/drivers/char/Config.in --- a/drivers/char/Config.in Thu May 30 21:28:58 2002 +++ b/drivers/char/Config.in Thu May 30 21:28:58 2002 @@ -217,6 +217,7 @@ bool ' ALI chipset support' CONFIG_AGP_ALI bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS if [ "$CONFIG_IA64" = "y" ]; then + bool ' Intel 460GX support' CONFIG_AGP_I460 bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 fi fi diff -Nru a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h --- a/drivers/char/agp/agp.h Thu May 30 21:28:59 2002 +++ b/drivers/char/agp/agp.h Thu May 30 21:28:59 2002 @@ -84,8 +84,8 @@ void *dev_private_data; struct pci_dev *dev; gatt_mask *masks; - unsigned long *gatt_table; - unsigned long *gatt_table_real; + u32 *gatt_table; + u32 *gatt_table_real; unsigned long scratch_page; unsigned long gart_bus_addr; unsigned long gatt_bus_addr; @@ -99,7 +99,6 @@ int needs_scratch_page; int aperture_size_idx; int num_aperture_sizes; - int num_of_masks; int capndx; int cant_use_aperture; @@ -111,6 +110,7 @@ void (*cleanup) (void); void (*tlb_flush) (agp_memory *); unsigned long (*mask_memory) (unsigned long, int); + unsigned long (*unmask_memory) (unsigned long); void (*cache_flush) (void); int (*create_gatt_table) (void); int (*free_gatt_table) (void); @@ -223,6 +223,9 @@ #ifndef PCI_DEVICE_ID_INTEL_82443GX_1 #define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 #endif +#ifndef PCI_DEVICE_ID_INTEL_460GX +#define PCI_DEVICE_ID_INTEL_460GX 0x84ea +#endif #ifndef PCI_DEVICE_ID_AMD_IRONGATE_0 #define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 #endif @@ -267,6 +270,15 @@ #define INTEL_AGPCTRL 0xb0 #define INTEL_NBXCFG 0x50 #define INTEL_ERRSTS 0x91 + +/* Intel 460GX Registers */ +#define INTEL_I460_APBASE 0x10 +#define INTEL_I460_BAPBASE 0x98 +#define INTEL_I460_GXBCTL 0xa0 +#define INTEL_I460_AGPSIZ 0xa2 +#define INTEL_I460_ATTBASE 0xfe200000 +#define INTEL_I460_GATT_VALID (1UL << 24) +#define INTEL_I460_GATT_COHERENT (1UL << 25) /* intel i830 registers */ #define I830_GMCH_CTRL 0x52 diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c --- a/drivers/char/agp/agpgart_be.c Thu May 30 21:28:58 2002 +++ b/drivers/char/agp/agpgart_be.c Thu May 30 21:28:58 2002 @@ -22,6 +22,7 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * + * 460GX support by Chris Ahna */ #include #include @@ -43,6 +44,9 @@ #include #include #include +#include +#include +#include #include #include "agp.h" @@ -61,31 +65,42 @@ static void flush_cache(void); +/* Declare these with attribute unused so the compiler doesn't complain if the + routines are not used. It would be even better if they weren't compiled + into the kernel at all. */ +static void agp_generic_agp_enable (u32) __attribute__((unused)); +static int agp_generic_create_gatt_table (void) __attribute__((unused)); +static int agp_generic_suspend (void) __attribute__((unused)); +static void agp_generic_resume (void) __attribute__((unused)); +static int agp_generic_free_gatt_table (void) __attribute__((unused)); +static int agp_generic_insert_memory (agp_memory *, off_t, int) __attribute__((unused)); +static int agp_generic_remove_memory (agp_memory *, off_t, int) __attribute__((unused)); +static agp_memory *agp_generic_alloc_by_type (size_t, int) __attribute__((unused)); +static void agp_generic_free_by_type (agp_memory *) __attribute__((unused)); +static unsigned long agp_generic_alloc_page (void) __attribute__((unused)); +static void agp_generic_destroy_page (unsigned long) __attribute__((unused)); +static unsigned long agp_generic_unmask_memory (unsigned long) __attribute__((unused)); + + static struct agp_bridge_data agp_bridge; static int agp_try_unsupported __initdata = 0; +#if defined(__alpha__) || defined(__ia64__) || defined(__sparc__) static inline void flush_cache(void) { -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__) - /* ??? I wonder if we'll really need to flush caches, or if the - core logic can manage to keep the system coherent. The ARM - speaks only of using `cflush' to get things in memory in - preparation for power failure. + mb(); +} - If we do need to call `cflush', we'll need a target page, - as we can only flush one page at a time. +#define smp_flush_cache flush_cache - Ditto for IA-64. --davidm 00/08/07 */ - mb(); -#else -#error "Please define flush_cache." -#endif +#elif defined(__i386__) || defined(__x86_64__) + +static inline void flush_cache(void) +{ + asm volatile ("wbinvd":::"memory"); } -#ifdef CONFIG_SMP static atomic_t cpus_waiting; static void ipi_handler(void *null) @@ -105,10 +120,15 @@ while (atomic_read(&cpus_waiting) > 0) barrier(); } -#define global_cache_flush smp_flush_cache -#else /* CONFIG_SMP */ -#define global_cache_flush flush_cache -#endif /* CONFIG_SMP */ +#else +# error "Please define flush_cache." +#endif + +#ifdef CONFIG_SMP +# define global_cache_flush smp_flush_cache +#else +# define global_cache_flush flush_cache +#endif int agp_backend_acquire(void) { @@ -205,13 +225,17 @@ agp_bridge.free_by_type(curr); return; } - if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->memory[i] &= ~(0x00000fff); - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(curr->memory[i])); + if (!agp_bridge.cant_use_aperture) { + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + agp_bridge.agp_destroy_page((unsigned long) + phys_to_virt(curr->memory[i])); + } } + } else { + vfree(curr->vmptr); } + agp_free_key(curr->key); vfree(curr->memory); kfree(curr); @@ -247,26 +271,48 @@ scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; new = agp_create_memory(scratch_pages); - if (new == NULL) { MOD_DEC_USE_COUNT; return NULL; } - for (i = 0; i < page_count; i++) { - new->memory[i] = agp_bridge.agp_alloc_page(); - if (new->memory[i] == 0) { - /* Free this structure */ - agp_free_memory(new); + if (!agp_bridge.cant_use_aperture) { + for (i = 0; i < page_count; i++) { + new->memory[i] = agp_bridge.agp_alloc_page(); + + if (new->memory[i] == 0) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = virt_to_phys((void *) new->memory[i]); + new->page_count++; + } + } else { + void *vmblock, *vaddr; + unsigned long paddr; + struct page *page; + + vmblock = __vmalloc(page_count << PAGE_SHIFT, GFP_KERNEL, PAGE_KERNEL); + if (vmblock == NULL) { + MOD_DEC_USE_COUNT; return NULL; } - new->memory[i] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[i]), - type); - new->page_count++; - } + new->vmptr = vmblock; + vaddr = vmblock; + + for (i = 0; i < page_count; i++, vaddr += PAGE_SIZE) { + page = vmalloc_to_page(vaddr); + if (!page) { + MOD_DEC_USE_COUNT; + return NULL; + } + paddr = virt_to_phys(page_address(page)); + new->memory[i] = paddr; + } + new->page_count = page_count; + } return new; } @@ -307,9 +353,6 @@ void agp_copy_info(agp_kern_info * info) { - unsigned long page_mask = 0; - int i; - memset(info, 0, sizeof(agp_kern_info)); if (agp_bridge.type == NOT_SUPPORTED) { info->chipset = agp_bridge.type; @@ -325,11 +368,7 @@ info->max_memory = agp_bridge.max_memory_agp; info->current_memory = atomic_read(&agp_bridge.current_memory_agp); info->cant_use_aperture = agp_bridge.cant_use_aperture; - - for(i = 0; i < agp_bridge.num_of_masks; i++) - page_mask |= agp_bridge.mask_memory(page_mask, i); - - info->page_mask = ~page_mask; + info->page_mask = ~0UL; } /* End - Routine to copy over information structure */ @@ -581,7 +620,7 @@ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (unsigned long *) table; + agp_bridge.gatt_table_real = (u32 *) table; CACHE_FLUSH(); agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); @@ -713,7 +752,8 @@ mem->is_flushed = TRUE; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - agp_bridge.gatt_table[j] = mem->memory[i]; + agp_bridge.gatt_table[j] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); @@ -798,6 +838,11 @@ agp_bridge.agp_enable(mode); } +static unsigned long agp_generic_unmask_memory(unsigned long addr) +{ + return addr & ~0x00000fffUL; +} + /* End - Generic Agp routines */ #ifdef CONFIG_AGP_I810 @@ -937,7 +982,7 @@ agp_bridge.tlb_flush(mem); return 0; } - if((type == AGP_PHYS_MEMORY) && + if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY)) { goto insert; } @@ -948,7 +993,8 @@ CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (j * 4), mem->memory[i]); + I810_PTE_BASE + (j * 4), + agp_bridge.mask_memory(mem->memory[i], mem->type)); } CACHE_FLUSH(); @@ -1014,10 +1060,7 @@ agp_free_memory(new); return NULL; } - new->memory[0] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[0]), - type); + new->memory[0] = virt_to_phys((void *) new->memory[0]); new->page_count = 1; new->num_scratch_pages = 1; new->type = AGP_PHYS_MEMORY; @@ -1051,7 +1094,6 @@ intel_i810_private.i810_dev = i810_dev; agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 2; agp_bridge.aperture_sizes = (void *) intel_i810_sizes; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1062,6 +1104,7 @@ agp_bridge.cleanup = intel_i810_cleanup; agp_bridge.tlb_flush = intel_i810_tlbflush; agp_bridge.mask_memory = intel_i810_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = intel_i810_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1253,7 +1296,8 @@ CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]); + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4), + agp_bridge.mask_memory(mem->memory[i], mem->type)); CACHE_FLUSH(); @@ -1314,7 +1358,7 @@ return(NULL); } - nw->memory[0] = agp_bridge.mask_memory(virt_to_phys((void *) nw->memory[0]),type); + nw->memory[0] = virt_to_phys((void *) nw->memory[0]); nw->page_count = 1; nw->num_scratch_pages = 1; nw->type = AGP_PHYS_MEMORY; @@ -1330,7 +1374,6 @@ intel_i830_private.i830_dev = i830_dev; agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 3; agp_bridge.aperture_sizes = (void *) intel_i830_sizes; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1365,6 +1408,570 @@ #endif /* CONFIG_AGP_I810 */ +#ifdef CONFIG_AGP_I460 + +/* BIOS configures the chipset so that one of two apbase registers are used */ +static u8 intel_i460_dynamic_apbase = 0x10; + +/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ +static u8 intel_i460_pageshift = 12; +static u32 intel_i460_pagesize; + +/* Keep track of which is larger, chipset or kernel page size. */ +static u32 intel_i460_cpk = 1; + +/* Structure for tracking partial use of 4MB GART pages */ +static u32 **i460_pg_detail = NULL; +static u32 *i460_pg_count = NULL; + +#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) +#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) + +#define I460_SRAM_IO_DISABLE (1 << 4) +#define I460_BAPBASE_ENABLE (1 << 3) +#define I460_AGPSIZ_MASK 0x7 +#define I460_4M_PS (1 << 1) + +#define log2(x) ffz(~(x)) + +static inline void intel_i460_read_back (volatile u32 *entry) +{ + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + *entry; +} + +static int intel_i460_fetch_size(void) +{ + int i; + u8 temp; + aper_size_info_8 *values; + + /* Determine the GART page size */ + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); + intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; + intel_i460_pagesize = 1UL << intel_i460_pageshift; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + + /* Exit now if the IO drivers for the GART SRAMS are turned off */ + if (temp & I460_SRAM_IO_DISABLE) { + printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); + printk(KERN_ERR PFX "AGPGART operation not possible\n"); + return 0; + } + + /* Make sure we don't try to create an 2 ^ 23 entry GATT */ + if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); + return 0; + } + + /* Determine the proper APBASE register */ + if (temp & I460_BAPBASE_ENABLE) + intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; + else + intel_i460_dynamic_apbase = INTEL_I460_APBASE; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* + * Dynamically calculate the proper num_entries and page_order values for + * the define aperture sizes. Take care not to shift off the end of + * values[i].size. + */ + values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12); + values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); + } + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* Neglect control bits when matching up size_value */ + if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { + agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* There isn't anything to do here since 460 has no GART TLB. */ +static void intel_i460_tlb_flush(agp_memory * mem) +{ + return; +} + +/* + * This utility function is needed to prevent corruption of the control bits + * which are stored along with the aperture size in 460's AGPSIZ register + */ +static void intel_i460_write_agpsiz(u8 size_value) +{ + u8 temp; + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, + ((temp & ~I460_AGPSIZ_MASK) | size_value)); +} + +static void intel_i460_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + intel_i460_write_agpsiz(previous_size->size_value); + + if (intel_i460_cpk == 0) { + vfree(i460_pg_detail); + vfree(i460_pg_count); + } +} + + +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +static int intel_i460_configure(void) +{ + union { + u32 small[2]; + u64 large; + } temp; + u8 scratch; + int i; + + aper_size_info_8 *current_size; + + temp.large = 0; + + current_size = A_SIZE_8(agp_bridge.current_size); + intel_i460_write_agpsiz(current_size->size_value); + + /* + * Do the necessary rigmarole to read all eight bytes of APBASE. + * This has to be done since the AGP aperture can be above 4GB on + * 460 based systems. + */ + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0])); + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1])); + + /* Clear BAR control bits */ + agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, + (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); + + /* + * Initialize partial allocation trackers if a GART page is bigger than + * a kernel page. + */ + if (I460_CPAGES_PER_KPAGE >= 1) { + intel_i460_cpk = 1; + } else { + intel_i460_cpk = 0; + + i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries); + i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries); + + for (i = 0; i < current_size->num_entries; i++) { + i460_pg_count[i] = 0; + i460_pg_detail[i] = NULL; + } + } + return 0; +} + +static int intel_i460_create_gatt_table(void) +{ + char *table; + int i; + int page_order; + int num_entries; + void *temp; + + /* + * Load up the fixed address of the GART SRAMS which hold our + * GATT table. + */ + table = (char *) __va(INTEL_I460_ATTBASE); + + temp = agp_bridge.current_size; + page_order = A_SIZE_8(temp)->page_order; + num_entries = A_SIZE_8(temp)->num_entries; + + agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + return 0; +} + +static int intel_i460_free_gatt_table(void) +{ + int num_entries; + int i; + void *temp; + + temp = agp_bridge.current_size; + + num_entries = A_SIZE_8(temp)->num_entries; + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + + iounmap(agp_bridge.gatt_table); + return 0; +} + +/* These functions are called when PAGE_SIZE exceeds the GART page size */ + +static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type) +{ + int i, j, k, num_entries; + void *temp; + unsigned long paddr; + + /* + * The rest of the kernel will compute page offsets in terms of + * PAGE_SIZE. + */ + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + +#if 0 + /* not necessary since 460 GART is operated in coherent mode... */ + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } +#endif + + for (i = 0, j = pg_start; i < mem->page_count; i++) { + paddr = mem->memory[i]; + for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) + agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type); + } + + intel_i460_read_back(agp_bridge.gatt_table + j - 1); + return 0; +} + +static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type) +{ + int i; + + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++) + agp_bridge.gatt_table[i] = 0; + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + return 0; +} + +/* + * These functions are called when the GART page size exceeds PAGE_SIZE. + * + * This situation is interesting since AGP memory allocations that are + * smaller than a single GART page are possible. The structures i460_pg_count + * and i460_pg_detail track partial allocation of the large GART pages to + * work around this issue. + * + * i460_pg_count[pg_num] tracks the number of kernel pages in use within + * GART page pg_num. i460_pg_detail[pg_num] is an array containing a + * psuedo-GART entry for each of the aforementioned kernel pages. The whole + * of i460_pg_detail is equivalent to a giant GATT with page size equal to + * that of the kernel. + */ + +static void *intel_i460_alloc_large_page(int pg_num) +{ + int i; + void *bp, *bp_end; + struct page *page; + + i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE); + if (i460_pg_detail[pg_num] == NULL) { + printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); + return NULL; + } + + for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) + i460_pg_detail[pg_num][i] = 0; + + bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT); + if (bp == NULL) { + printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); + return NULL; + } + + bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); + + for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { + atomic_inc(&agp_bridge.current_memory_agp); + } + return bp; +} + +static void intel_i460_free_large_page(int pg_num, unsigned long addr) +{ + struct page *page; + void *bp, *bp_end; + + bp = (void *) __va(addr); + bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))); + + vfree(i460_pg_detail[pg_num]); + i460_pg_detail[pg_num] = NULL; + + for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { + atomic_dec(&agp_bridge.current_memory_agp); + } + + free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); +} + +static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + + if (end_pg > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + /* Check if the requested region of the aperture is free */ + for (pg = start_pg; pg <= end_pg; pg++) { + /* Allocate new GART pages if necessary */ + if (i460_pg_detail[pg] == NULL) { + temp = intel_i460_alloc_large_page(pg); + if (temp == NULL) + return -ENOMEM; + agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp, + 0); + intel_i460_read_back(agp_bridge.gatt_table + pg); + } + + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++) + { + if (i460_pg_detail[pg][idx] != 0) + return -EBUSY; + } + } + +#if 0 + /* not necessary since 460 GART is operated in coherent mode... */ + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } +#endif + + for (pg = start_pg, i = 0; pg <= end_pg; pg++) { + paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++, i++) + { + mem->memory[i] = paddr + (idx * PAGE_SIZE); + i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i], + mem->type); + i460_pg_count[pg]++; + } + } + + return 0; +} + +static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + + for (i = 0, pg = start_pg; pg <= end_pg; pg++) { + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++, i++) + { + mem->memory[i] = 0; + i460_pg_detail[pg][idx] = 0; + i460_pg_count[pg]--; + } + + /* Free GART pages if they are unused */ + if (i460_pg_count[pg] == 0) { + paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); + agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; + intel_i460_read_back(agp_bridge.gatt_table + pg); + intel_i460_free_large_page(pg, paddr); + } + } + return 0; +} + +/* Dummy routines to call the approriate {cpk,kpc} function */ + +static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_insert_memory_cpk(mem, pg_start, type); + else + return intel_i460_insert_memory_kpc(mem, pg_start, type); +} + +static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_remove_memory_cpk(mem, pg_start, type); + else + return intel_i460_remove_memory_kpc(mem, pg_start, type); +} + +/* + * If the kernel page size is smaller that the chipset page size, we don't + * want to allocate memory until we know where it is to be bound in the + * aperture (a multi-kernel-page alloc might fit inside of an already + * allocated GART page). Consequently, don't allocate or free anything + * if i460_cpk (meaning chipset pages per kernel page) isn't set. + * + * Let's just hope nobody counts on the allocated AGP memory being there + * before bind time (I don't think current drivers do)... + */ +static unsigned long intel_i460_alloc_page(void) +{ + if (intel_i460_cpk) + return agp_generic_alloc_page(); + + /* Returning NULL would cause problems */ + return ~0UL; +} + +static void intel_i460_destroy_page(unsigned long page) +{ + if (intel_i460_cpk) + agp_generic_destroy_page(page); +} + +static gatt_mask intel_i460_masks[] = +{ + { + INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, + 0 + } +}; + +static unsigned long intel_i460_mask_memory(unsigned long addr, int type) +{ + /* Make sure the returned address is a valid GATT entry */ + return (agp_bridge.masks[0].mask + | (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); +} + +static unsigned long intel_i460_unmask_memory(unsigned long addr) +{ + /* Turn a GATT entry into a physical address */ + return ((addr & 0xffffff) << 12); +} + +static aper_size_info_8 intel_i460_sizes[3] = +{ + /* + * The 32GB aperture is only available with a 4M GART page size. + * Due to the dynamic GART page size, we can't figure out page_order + * or num_entries until runtime. + */ + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; + +static int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused))) +{ + agp_bridge.masks = intel_i460_masks; + agp_bridge.aperture_sizes = (void *) intel_i460_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 3; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_i460_configure; + agp_bridge.fetch_size = intel_i460_fetch_size; + agp_bridge.cleanup = intel_i460_cleanup; + agp_bridge.tlb_flush = intel_i460_tlb_flush; + agp_bridge.mask_memory = intel_i460_mask_memory; + agp_bridge.unmask_memory = intel_i460_unmask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = intel_i460_create_gatt_table; + agp_bridge.free_gatt_table = intel_i460_free_gatt_table; + agp_bridge.insert_memory = intel_i460_insert_memory; + agp_bridge.remove_memory = intel_i460_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = intel_i460_alloc_page; + agp_bridge.agp_destroy_page = intel_i460_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 1; + return 0; +} + +#endif /* CONFIG_AGP_I460 */ + #ifdef CONFIG_AGP_INTEL static int intel_fetch_size(void) @@ -1746,10 +2353,9 @@ {32, 8192, 3, 56} }; -static int __init intel_generic_setup (struct pci_dev *pdev) +static int __init intel_generic_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_generic_sizes; agp_bridge.size_type = U16_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1760,6 +2366,7 @@ agp_bridge.cleanup = intel_cleanup; agp_bridge.tlb_flush = intel_tlbflush; agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1775,15 +2382,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } - -static int __init intel_820_setup (struct pci_dev *pdev) +static int __init intel_820_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1809,14 +2412,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_830mp_setup (struct pci_dev *pdev) +static int __init intel_830mp_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_830mp_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 4; @@ -1842,14 +2442,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_840_setup (struct pci_dev *pdev) +static int __init intel_840_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1875,14 +2472,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_845_setup (struct pci_dev *pdev) +static int __init intel_845_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1893,6 +2487,7 @@ agp_bridge.cleanup = intel_8xx_cleanup; agp_bridge.tlb_flush = intel_8xx_tlbflush; agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1908,14 +2503,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_850_setup (struct pci_dev *pdev) +static int __init intel_850_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1926,6 +2518,7 @@ agp_bridge.cleanup = intel_8xx_cleanup; agp_bridge.tlb_flush = intel_8xx_tlbflush; agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -1941,14 +2534,11 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } -static int __init intel_860_setup (struct pci_dev *pdev) +static int __init intel_860_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1974,8 +2564,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_INTEL */ @@ -2065,10 +2653,9 @@ {0x00000000, 0} }; -static int __init via_generic_setup (struct pci_dev *pdev) +static int __init via_generic_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = via_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) via_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2079,6 +2666,7 @@ agp_bridge.cleanup = via_cleanup; agp_bridge.tlb_flush = via_tlbflush; agp_bridge.mask_memory = via_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -2094,8 +2682,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_VIA */ @@ -2182,7 +2768,6 @@ static int __init sis_generic_setup (struct pci_dev *pdev) { agp_bridge.masks = sis_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) sis_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2193,6 +2778,7 @@ agp_bridge.cleanup = sis_cleanup; agp_bridge.tlb_flush = sis_tlbflush; agp_bridge.mask_memory = sis_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -2215,8 +2801,8 @@ #ifdef CONFIG_AGP_AMD typedef struct _amd_page_map { - unsigned long *real; - unsigned long *remapped; + u32 *real; + u32 *remapped; } amd_page_map; static struct _amd_irongate_private { @@ -2229,14 +2815,13 @@ { int i; - page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + page_map->real = (u32 *) __get_free_page(GFP_KERNEL); if (page_map->real == NULL) { return -ENOMEM; } SetPageReserved(virt_to_page(page_map->real)); CACHE_FLUSH(); - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), - PAGE_SIZE); + page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), PAGE_SIZE); if (page_map->remapped == NULL) { ClearPageReserved(virt_to_page(page_map->real)); free_page((unsigned long) page_map->real); @@ -2482,7 +3067,7 @@ off_t pg_start, int type) { int i, j, num_entries; - unsigned long *cur_gatt; + u32 *cur_gatt; unsigned long addr; num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; @@ -2512,17 +3097,17 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); return 0; } -static int amd_remove_memory(agp_memory * mem, off_t pg_start, - int type) +static int amd_remove_memory(agp_memory * mem, off_t pg_start, int type) { int i; - unsigned long *cur_gatt; + u32 *cur_gatt; unsigned long addr; if (type != 0 || mem->type != 0) { @@ -2555,10 +3140,9 @@ {0x00000001, 0} }; -static int __init amd_irongate_setup (struct pci_dev *pdev) +static int __init amd_irongate_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = amd_irongate_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; agp_bridge.size_type = LVL2_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2569,6 +3153,7 @@ agp_bridge.cleanup = amd_irongate_cleanup; agp_bridge.tlb_flush = amd_irongate_tlbflush; agp_bridge.mask_memory = amd_irongate_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = amd_create_gatt_table; @@ -2584,8 +3169,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_AMD */ @@ -2802,10 +3385,9 @@ {4, 1024, 0, 3} }; -static int __init ali_generic_setup (struct pci_dev *pdev) +static int __init ali_generic_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = ali_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) ali_generic_sizes; agp_bridge.size_type = U32_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2816,6 +3398,7 @@ agp_bridge.cleanup = ali_cleanup; agp_bridge.tlb_flush = ali_tlbflush; agp_bridge.mask_memory = ali_mask_memory; + agp_bridge.unmask_memory = agp_generic_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = ali_cache_flush; agp_bridge.create_gatt_table = agp_generic_create_gatt_table; @@ -2831,8 +3414,6 @@ agp_bridge.cant_use_aperture = 0; return 0; - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_ALI */ @@ -3211,14 +3792,14 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); return 0; } -static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, - int type) +static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, int type) { int i; unsigned long *cur_gatt; @@ -3273,368 +3854,6 @@ * AGP devices and collect their data. */ -#ifdef CONFIG_AGP_HP_ZX1 - -#ifndef log2 -#define log2(x) ffz(~(x)) -#endif - -#define HP_ZX1_IOVA_BASE GB(1UL) -#define HP_ZX1_IOVA_SIZE GB(1UL) -#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) -#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL - -#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL -#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \ - hp_private.io_tlb_shift) - -static aper_size_info_fixed hp_zx1_sizes[] = -{ - {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ -}; - -static gatt_mask hp_zx1_masks[] = -{ - {HP_ZX1_PDIR_VALID_BIT, 0} -}; - -static struct _hp_private { - struct pci_dev *ioc; - volatile u8 *registers; - u64 *io_pdir; // PDIR for entire IOVA - u64 *gatt; // PDIR just for GART (subset of above) - u64 gatt_entries; - u64 iova_base; - u64 gart_base; - u64 gart_size; - u64 io_pdir_size; - int io_pdir_owner; // do we own it, or share it with sba_iommu? - int io_page_size; - int io_tlb_shift; - int io_tlb_ps; // IOC ps config - int io_pages_per_kpage; -} hp_private; - -static int __init hp_zx1_ioc_shared(void) -{ - struct _hp_private *hp = &hp_private; - - printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); - - /* - * IOC already configured by sba_iommu module; just use - * its setup. We assume: - * - IOVA space is 1Gb in size - * - first 512Mb is IOMMU, second 512Mb is GART - */ - hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG); - switch (hp->io_tlb_ps) { - case 0: hp->io_tlb_shift = 12; break; - case 1: hp->io_tlb_shift = 13; break; - case 2: hp->io_tlb_shift = 14; break; - case 3: hp->io_tlb_shift = 16; break; - default: - printk(KERN_ERR PFX "Invalid IOTLB page size " - "configuration 0x%x\n", hp->io_tlb_ps); - hp->gatt = 0; - hp->gatt_entries = 0; - return -ENODEV; - } - hp->io_page_size = 1 << hp->io_tlb_shift; - hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - - hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1; - hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; - - hp->gart_size = HP_ZX1_GART_SIZE; - hp->gatt_entries = hp->gart_size / hp->io_page_size; - - hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE)); - hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; - - if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { - hp->gatt = 0; - hp->gatt_entries = 0; - printk(KERN_ERR PFX "No reserved IO PDIR entry found; " - "GART disabled\n"); - return -ENODEV; - } - - return 0; -} - -static int __init hp_zx1_ioc_owner(u8 ioc_rev) -{ - struct _hp_private *hp = &hp_private; - - printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); - - /* - * Select an IOV page size no larger than system page size. - */ - if (PAGE_SIZE >= KB(64)) { - hp->io_tlb_shift = 16; - hp->io_tlb_ps = 3; - } else if (PAGE_SIZE >= KB(16)) { - hp->io_tlb_shift = 14; - hp->io_tlb_ps = 2; - } else if (PAGE_SIZE >= KB(8)) { - hp->io_tlb_shift = 13; - hp->io_tlb_ps = 1; - } else { - hp->io_tlb_shift = 12; - hp->io_tlb_ps = 0; - } - hp->io_page_size = 1 << hp->io_tlb_shift; - hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - - hp->iova_base = HP_ZX1_IOVA_BASE; - hp->gart_size = HP_ZX1_GART_SIZE; - hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; - - hp->gatt_entries = hp->gart_size / hp->io_page_size; - hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); - - return 0; -} - -static int __init hp_zx1_ioc_init(void) -{ - struct _hp_private *hp = &hp_private; - struct pci_dev *ioc; - int i; - u8 ioc_rev; - - ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); - if (!ioc) { - printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n"); - return -ENODEV; - } - hp->ioc = ioc; - - pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { - hp->registers = (u8 *) ioremap(pci_resource_start(ioc, - i), - pci_resource_len(ioc, i)); - break; - } - } - if (!hp->registers) { - printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n"); - - return -ENODEV; - } - - /* - * If the IOTLB is currently disabled, we can take it over. - * Otherwise, we have to share with sba_iommu. - */ - hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; - - if (hp->io_pdir_owner) - return hp_zx1_ioc_owner(ioc_rev); - - return hp_zx1_ioc_shared(); -} - -static int hp_zx1_fetch_size(void) -{ - int size; - - size = hp_private.gart_size / MB(1); - hp_zx1_sizes[0].size = size; - agp_bridge.current_size = (void *) &hp_zx1_sizes[0]; - return size; -} - -static int hp_zx1_configure(void) -{ - struct _hp_private *hp = &hp_private; - - agp_bridge.gart_bus_addr = hp->gart_base; - agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP); - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); - - if (hp->io_pdir_owner) { - OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, - virt_to_phys(hp->io_pdir)); - OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps); - OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); - OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1); - OUTREG64(hp->registers, HP_ZX1_PCOM, - hp->iova_base | log2(HP_ZX1_IOVA_SIZE)); - INREG64(hp->registers, HP_ZX1_PCOM); - } - - return 0; -} - -static void hp_zx1_cleanup(void) -{ - struct _hp_private *hp = &hp_private; - - if (hp->io_pdir_owner) - OUTREG64(hp->registers, HP_ZX1_IBASE, 0); - iounmap((void *) hp->registers); -} - -static void hp_zx1_tlbflush(agp_memory * mem) -{ - struct _hp_private *hp = &hp_private; - - OUTREG64(hp->registers, HP_ZX1_PCOM, - hp->gart_base | log2(hp->gart_size)); - INREG64(hp->registers, HP_ZX1_PCOM); -} - -static int hp_zx1_create_gatt_table(void) -{ - struct _hp_private *hp = &hp_private; - int i; - - if (hp->io_pdir_owner) { - hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, - get_order(hp->io_pdir_size)); - if (!hp->io_pdir) { - printk(KERN_ERR PFX "Couldn't allocate contiguous " - "memory for I/O PDIR\n"); - hp->gatt = 0; - hp->gatt_entries = 0; - return -ENOMEM; - } - memset(hp->io_pdir, 0, hp->io_pdir_size); - - hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; - } - - for (i = 0; i < hp->gatt_entries; i++) { - hp->gatt[i] = (unsigned long) agp_bridge.scratch_page; - } - - return 0; -} - -static int hp_zx1_free_gatt_table(void) -{ - struct _hp_private *hp = &hp_private; - - if (hp->io_pdir_owner) - free_pages((unsigned long) hp->io_pdir, - get_order(hp->io_pdir_size)); - else - hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; - return 0; -} - -static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type) -{ - struct _hp_private *hp = &hp_private; - int i, k; - off_t j, io_pg_start; - int io_pg_count; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - - io_pg_start = hp->io_pages_per_kpage * pg_start; - io_pg_count = hp->io_pages_per_kpage * mem->page_count; - if ((io_pg_start + io_pg_count) > hp->gatt_entries) { - return -EINVAL; - } - - j = io_pg_start; - while (j < (io_pg_start + io_pg_count)) { - if (hp->gatt[j]) { - return -EBUSY; - } - j++; - } - - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - - for (i = 0, j = io_pg_start; i < mem->page_count; i++) { - unsigned long paddr; - - paddr = mem->memory[i]; - for (k = 0; - k < hp->io_pages_per_kpage; - k++, j++, paddr += hp->io_page_size) { - hp->gatt[j] = agp_bridge.mask_memory(paddr, type); - } - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type) -{ - struct _hp_private *hp = &hp_private; - int i, io_pg_start, io_pg_count; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - - io_pg_start = hp->io_pages_per_kpage * pg_start; - io_pg_count = hp->io_pages_per_kpage * mem->page_count; - for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { - hp->gatt[i] = agp_bridge.scratch_page; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static unsigned long hp_zx1_mask_memory(unsigned long addr, int type) -{ - return HP_ZX1_PDIR_VALID_BIT | addr; -} - -static unsigned long hp_zx1_unmask_memory(unsigned long addr) -{ - return addr & ~(HP_ZX1_PDIR_VALID_BIT); -} - -static int __init hp_zx1_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = hp_zx1_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.dev_private_data = NULL; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = hp_zx1_configure; - agp_bridge.fetch_size = hp_zx1_fetch_size; - agp_bridge.cleanup = hp_zx1_cleanup; - agp_bridge.tlb_flush = hp_zx1_tlbflush; - agp_bridge.mask_memory = hp_zx1_mask_memory; - agp_bridge.unmask_memory = hp_zx1_unmask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; - agp_bridge.free_gatt_table = hp_zx1_free_gatt_table; - agp_bridge.insert_memory = hp_zx1_insert_memory; - agp_bridge.remove_memory = hp_zx1_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.cant_use_aperture = 1; - - return hp_zx1_ioc_init(); - - (void) pdev; /* unused */ -} - -#endif /* CONFIG_AGP_HP_ZX1 */ pci_for_each_dev(device) { cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); @@ -3730,7 +3949,6 @@ serverworks_private.svrwrks_dev = pdev; agp_bridge.masks = serverworks_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) serverworks_sizes; agp_bridge.size_type = LVL2_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -4125,10 +4343,9 @@ return addr & ~(HP_ZX1_PDIR_VALID_BIT); } -static int __init hp_zx1_setup (struct pci_dev *pdev) +static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = hp_zx1_masks; - agp_bridge.num_of_masks = 1; agp_bridge.dev_private_data = NULL; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.needs_scratch_page = FALSE; @@ -4148,11 +4365,11 @@ agp_bridge.free_by_type = agp_generic_free_by_type; agp_bridge.agp_alloc_page = agp_generic_alloc_page; agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; agp_bridge.cant_use_aperture = 1; return hp_zx1_ioc_init(); - - (void) pdev; /* unused */ } #endif /* CONFIG_AGP_HP_ZX1 */ @@ -4329,6 +4546,15 @@ #endif /* CONFIG_AGP_INTEL */ +#ifdef CONFIG_AGP_I460 + { PCI_DEVICE_ID_INTEL_460GX, + PCI_VENDOR_ID_INTEL, + INTEL_460GX, + "Intel", + "460GX", + intel_i460_setup }, +#endif + #ifdef CONFIG_AGP_SIS { PCI_DEVICE_ID_SI_740, PCI_VENDOR_ID_SI, @@ -4458,15 +4684,6 @@ hp_zx1_setup }, #endif -#ifdef CONFIG_AGP_HP_ZX1 - { PCI_DEVICE_ID_HP_ZX1_LBA, - PCI_VENDOR_ID_HP, - HP_ZX1, - "HP", - "ZX1", - hp_zx1_setup }, -#endif - { 0, }, /* dummy final entry, always present */ }; @@ -4545,6 +4762,18 @@ return -ENODEV; } +static int agp_check_supported_device(struct pci_dev *dev) { + + int i; + + for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) { + if (dev->vendor == agp_bridge_info[i].vendor_id && + dev->device == agp_bridge_info[i].device_id) + return 1; + } + + return 0; +} /* Supported Device Scanning routine */ @@ -4553,8 +4782,14 @@ struct pci_dev *dev = NULL; u8 cap_ptr = 0x00; - if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) - return -ENODEV; + /* + * Some systems have multiple host bridges (i.e. BigSur), so + * we can't just use the first one we find. + */ + do { + if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev)) == NULL) + return -ENODEV; + } while(!agp_check_supported_device(dev)); agp_bridge.dev = dev; @@ -4690,23 +4925,6 @@ } #endif /* CONFIG_AGP_SWORKS */ - -#ifdef CONFIG_AGP_HP_ZX1 - if (dev->vendor == PCI_VENDOR_ID_HP) { - do { - /* ZX1 LBAs can be either PCI or AGP bridges */ - if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { - printk(KERN_INFO PFX "Detected HP ZX1 AGP " - "chipset at %s\n", dev->slot_name); - agp_bridge.type = HP_ZX1; - agp_bridge.dev = dev; - return hp_zx1_setup(dev); - } - dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev); - } while (dev); - return -ENODEV; - } -#endif /* CONFIG_AGP_HP_ZX1 */ #ifdef CONFIG_AGP_HP_ZX1 if (dev->vendor == PCI_VENDOR_ID_HP) { diff -Nru a/drivers/char/drm/ati_pcigart.h b/drivers/char/drm/ati_pcigart.h --- a/drivers/char/drm/ati_pcigart.h Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/ati_pcigart.h Thu May 30 21:28:59 2002 @@ -30,14 +30,20 @@ #define __NO_VERSION__ #include "drmP.h" -#if PAGE_SIZE == 8192 +#if PAGE_SIZE == 65536 +# define ATI_PCIGART_TABLE_ORDER 0 +# define ATI_PCIGART_TABLE_PAGES (1 << 0) +#elif PAGE_SIZE == 16384 +# define ATI_PCIGART_TABLE_ORDER 1 +# define ATI_PCIGART_TABLE_PAGES (1 << 1) +#elif PAGE_SIZE == 8192 # define ATI_PCIGART_TABLE_ORDER 2 # define ATI_PCIGART_TABLE_PAGES (1 << 2) #elif PAGE_SIZE == 4096 # define ATI_PCIGART_TABLE_ORDER 3 # define ATI_PCIGART_TABLE_PAGES (1 << 3) #else -# error - PAGE_SIZE not 8K or 4K +# error - PAGE_SIZE not 64K, 16K, 8K or 4K #endif # define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */ @@ -103,6 +109,7 @@ goto done; } +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) if ( !dev->pdev ) { DRM_ERROR( "PCI device unknown!\n" ); goto done; @@ -117,6 +124,9 @@ address = 0; goto done; } +#else + bus_address = virt_to_bus( (void *)address ); +#endif pci_gart = (u32 *)address; @@ -126,6 +136,7 @@ memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) ); for ( i = 0 ; i < pages ; i++ ) { +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) /* we need to support large memory configurations */ entry->busaddr[i] = pci_map_single(dev->pdev, page_address( entry->pagelist[i] ), @@ -139,7 +150,9 @@ goto done; } page_base = (u32) entry->busaddr[i]; - +#else + page_base = page_to_bus( entry->pagelist[i] ); +#endif for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { *pci_gart++ = cpu_to_le32( page_base ); page_base += ATI_PCIGART_PAGE_SIZE; @@ -164,6 +177,7 @@ unsigned long addr, dma_addr_t bus_addr) { +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) drm_sg_mem_t *entry = dev->sg; unsigned long pages; int i; @@ -188,6 +202,8 @@ PAGE_SIZE, PCI_DMA_TODEVICE); } } + +#endif if ( addr ) { DRM(ati_free_pcigart_table)( addr ); diff -Nru a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h --- a/drivers/char/drm/drmP.h Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/drmP.h Thu May 30 21:28:59 2002 @@ -228,16 +228,16 @@ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } /* Mapping helper macros */ -#define DRM_IOREMAP(map) \ - (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP(map, dev) \ + (map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) ) -#define DRM_IOREMAP_NOCACHE(map) \ - (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) +#define DRM_IOREMAP_NOCACHE(map) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size ) -#define DRM_IOREMAPFREE(map) \ - do { \ - if ( (map)->handle && (map)->size ) \ - DRM(ioremapfree)( (map)->handle, (map)->size ); \ +#define DRM_IOREMAPFREE(map, dev) \ + do { \ + if ( (map)->handle && (map)->size ) \ + DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \ } while (0) #define DRM_FIND_MAP(_map, _o) \ @@ -675,9 +675,9 @@ extern unsigned long DRM(alloc_pages)(int order, int area); extern void DRM(free_pages)(unsigned long address, int order, int area); -extern void *DRM(ioremap)(unsigned long offset, unsigned long size); +extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev); extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); -extern void DRM(ioremapfree)(void *pt, unsigned long size); +extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev); #if __REALLY_HAVE_AGP extern agp_memory *DRM(alloc_agp)(int pages, u32 type); diff -Nru a/drivers/char/drm/drm_agpsupport.h b/drivers/char/drm/drm_agpsupport.h --- a/drivers/char/drm/drm_agpsupport.h Thu May 30 21:28:58 2002 +++ b/drivers/char/drm/drm_agpsupport.h Thu May 30 21:28:58 2002 @@ -276,6 +276,7 @@ case INTEL_I845: head->chipset = "Intel i845"; break; #endif case INTEL_I850: head->chipset = "Intel i850"; break; + case INTEL_460GX: head->chipset = "Intel 460GX"; break; case VIA_GENERIC: head->chipset = "VIA"; break; case VIA_VP3: head->chipset = "VIA VP3"; break; diff -Nru a/drivers/char/drm/drm_bufs.h b/drivers/char/drm/drm_bufs.h --- a/drivers/char/drm/drm_bufs.h Thu May 30 21:28:58 2002 +++ b/drivers/char/drm/drm_bufs.h Thu May 30 21:28:59 2002 @@ -107,7 +107,7 @@ switch ( map->type ) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) if ( map->offset + map->size < map->offset || map->offset < virt_to_phys(high_memory) ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); @@ -124,7 +124,7 @@ MTRR_TYPE_WRCOMB, 1 ); } #endif - map->handle = DRM(ioremap)( map->offset, map->size ); + map->handle = DRM(ioremap)( map->offset, map->size, dev ); break; case _DRM_SHM: @@ -245,7 +245,7 @@ DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); diff -Nru a/drivers/char/drm/drm_drv.h b/drivers/char/drm/drm_drv.h --- a/drivers/char/drm/drm_drv.h Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/drm_drv.h Thu May 30 21:28:59 2002 @@ -423,7 +423,7 @@ DRM_DEBUG( "mtrr_del=%d\n", retcode ); } #endif - DRM(ioremapfree)( map->handle, map->size ); + DRM(ioremapfree)( map->handle, map->size, dev ); break; case _DRM_SHM: vfree(map->handle); diff -Nru a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h --- a/drivers/char/drm/drm_memory.h Thu May 30 21:28:58 2002 +++ b/drivers/char/drm/drm_memory.h Thu May 30 21:28:58 2002 @@ -291,9 +291,14 @@ } } -void *DRM(ioremap)(unsigned long offset, unsigned long size) +void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) { void *pt; +#if __REALLY_HAVE_AGP + drm_map_t *map = NULL; + drm_map_list_t *r_list; + struct list_head *list; +#endif if (!size) { DRM_MEM_ERROR(DRM_MEM_MAPPINGS, @@ -301,12 +306,51 @@ return NULL; } +#if __REALLY_HAVE_AGP + if(!dev->agp || dev->agp->cant_use_aperture == 0) + goto standard_ioremap; + + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *)list; + map = r_list->map; + if (!map) continue; + if (map->offset <= offset && + (map->offset + map->size) >= (offset + size)) + break; + } + + if(map && map->type == _DRM_AGP) { + struct drm_agp_mem *agpmem; + + for(agpmem = dev->agp->memory; agpmem; + agpmem = agpmem->next) { + if(agpmem->bound <= offset && + (agpmem->bound + (agpmem->pages + << PAGE_SHIFT)) >= (offset + size)) + break; + } + + if(agpmem == NULL) + goto ioremap_failure; + + pt = agpmem->memory->vmptr + (offset - agpmem->bound); + goto ioremap_success; + } + +standard_ioremap: +#endif if (!(pt = ioremap(offset, size))) { +#if __REALLY_HAVE_AGP +ioremap_failure: +#endif spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); return NULL; } +#if __REALLY_HAVE_AGP +ioremap_success: +#endif spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; @@ -337,7 +381,7 @@ return pt; } -void DRM(ioremapfree)(void *pt, unsigned long size) +void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) { int alloc_count; int free_count; @@ -345,7 +389,11 @@ if (!pt) DRM_MEM_ERROR(DRM_MEM_MAPPINGS, "Attempt to free NULL pointer\n"); +#if __REALLY_HAVE_AGP + else if(!dev->agp || dev->agp->cant_use_aperture == 0) +#else else +#endif iounmap(pt); spin_lock(&DRM(mem_lock)); diff -Nru a/drivers/char/drm/drm_scatter.h b/drivers/char/drm/drm_scatter.h --- a/drivers/char/drm/drm_scatter.h Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/drm_scatter.h Thu May 30 21:28:59 2002 @@ -47,9 +47,11 @@ vfree( entry->virtual ); +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) DRM(free)( entry->busaddr, entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); +#endif DRM(free)( entry->pagelist, entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES ); @@ -96,6 +98,7 @@ memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); if ( !entry->busaddr ) { @@ -108,12 +111,15 @@ return -ENOMEM; } memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) ); +#endif entry->virtual = vmalloc_32( pages << PAGE_SHIFT ); if ( !entry->virtual ) { +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) DRM(free)( entry->busaddr, entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES ); +#endif DRM(free)( entry->pagelist, entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES ); diff -Nru a/drivers/char/drm/drm_vm.h b/drivers/char/drm/drm_vm.h --- a/drivers/char/drm/drm_vm.h Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/drm_vm.h Thu May 30 21:28:59 2002 @@ -71,7 +71,7 @@ * Find the right map */ - if(!dev->agp->cant_use_aperture) goto vm_nopage_error; + if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; list_for_each(list, &dev->maplist->head) { r_list = (drm_map_list_t *)list; @@ -207,7 +207,7 @@ DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); @@ -421,15 +421,17 @@ switch (map->type) { case _DRM_AGP: -#if defined(__alpha__) - /* - * On Alpha we can't talk to bus dma address from the - * CPU, so for memory of type DRM_AGP, we'll deal with - * sorting out the real physical pages and mappings - * in nopage() - */ - vma->vm_ops = &DRM(vm_ops); - break; +#if __REALLY_HAVE_AGP + if(dev->agp->cant_use_aperture == 1) { + /* + * On some systems we can't talk to bus dma address from + * the CPU, so for memory of type DRM_AGP, we'll deal + * with sorting out the real physical pages and mappings + * in nopage() + */ + vma->vm_ops = &DRM(vm_ops); + goto mapswitch_out; + } #endif /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: @@ -491,6 +493,9 @@ default: return -EINVAL; /* This should never happen. */ } +#if __REALLY_HAVE_AGP +mapswitch_out: +#endif #if LINUX_VERSION_CODE <= 0x020414 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ #else diff -Nru a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c --- a/drivers/char/drm/i810_dma.c Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/i810_dma.c Thu May 30 21:28:59 2002 @@ -307,7 +307,7 @@ if(dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if(dev_priv->hw_status_page != 0UL) { i810_free_page(dev, dev_priv->hw_status_page); @@ -321,7 +321,8 @@ for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); + DRM(ioremapfree)(buf_priv->kernel_virtual, + buf->total, dev); } } return 0; @@ -393,7 +394,7 @@ *buf_priv->in_use = I810_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -446,7 +447,7 @@ dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff -Nru a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c --- a/drivers/char/drm/mga_dma.c Thu May 30 21:28:59 2002 +++ b/drivers/char/drm/mga_dma.c Thu May 30 21:28:59 2002 @@ -557,9 +557,9 @@ (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); - DRM_IOREMAP( dev_priv->warp ); - DRM_IOREMAP( dev_priv->primary ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->warp, dev ); + DRM_IOREMAP( dev_priv->primary, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->warp->handle || !dev_priv->primary->handle || @@ -647,9 +647,9 @@ if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; - DRM_IOREMAPFREE( dev_priv->warp ); - DRM_IOREMAPFREE( dev_priv->primary ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->warp, dev ); + DRM_IOREMAPFREE( dev_priv->primary, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); diff -Nru a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c --- a/drivers/char/drm/r128_cce.c Thu May 30 21:28:58 2002 +++ b/drivers/char/drm/r128_cce.c Thu May 30 21:28:58 2002 @@ -216,7 +216,23 @@ int i; for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { +#ifndef CONFIG_AGP_I460 if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail ) { +#else + /* + * XXX - this is (I think) a 460GX specific hack + * + * When doing texturing, ring.tail sometimes gets ahead of + * PM4_BUFFER_DL_WPTR by 2; consequently, the card processes + * its whole quota of instructions and *ring.head is still 2 + * short of ring.tail. Work around this for now in lieu of + * a better solution. + */ + if ( GET_RING_HEAD( &dev_priv->ring ) == dev_priv->ring.tail + || ( dev->agp->agp_info.chipset == INTEL_460GX + && ( dev_priv->ring.tail - GET_RING_HEAD( &dev_priv->ring ) ) == 2 ) ) + { +#endif int pm4stat = R128_READ( R128_PM4_STAT ); if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size ) && @@ -317,7 +333,7 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev, drm_r128_private_t *dev_priv ) { - u32 ring_start; + u32 ring_start, rptr_addr; u32 tmp; DRM_DEBUG( "%s\n", __FUNCTION__ ); @@ -341,8 +357,26 @@ SET_RING_HEAD( &dev_priv->ring, 0 ); if ( !dev_priv->is_pci ) { - R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, - dev_priv->ring_rptr->offset ); +#ifdef CONFIG_AGP_I460 + /* + * XXX - This is a 460GX specific hack + * + * We have to hack this right now. 460GX isn't claiming PCI + * writes from the card into the AGP aperture. Because of this, + * we have to get space outside of the aperture for RPTR_ADDR. + */ + if (dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off; + + alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA); + + dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off; + SET_RING_HEAD( &dev_priv->ring, 0 ); + rptr_addr = __pa( dev_priv->ring.head ); + } else +#endif + rptr_addr = dev_priv->ring_rptr->offset; + R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, rptr_addr ); } else { drm_sg_mem_t *entry = dev->sg; unsigned long tmp_ofs, page_ofs; @@ -350,11 +384,20 @@ tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle; page_ofs = tmp_ofs >> PAGE_SHIFT; +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", entry->busaddr[page_ofs], entry->handle + tmp_ofs ); +#else + R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, + page_to_bus(entry->pagelist[page_ofs])); + + DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n", + page_to_bus(entry->pagelist[page_ofs]), + entry->handle + tmp_ofs ); +#endif } /* Set watermark control */ @@ -550,9 +593,9 @@ init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cce_ring ); - DRM_IOREMAP( dev_priv->ring_rptr ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->cce_ring, dev ); + DRM_IOREMAP( dev_priv->ring_rptr, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -624,9 +667,9 @@ drm_r128_private_t *dev_priv = dev->dev_private; if ( !dev_priv->is_pci ) { - DRM_IOREMAPFREE( dev_priv->cce_ring ); - DRM_IOREMAPFREE( dev_priv->ring_rptr ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->cce_ring, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, @@ -634,6 +677,18 @@ DRM_ERROR( "failed to cleanup PCI GART!\n" ); } +#if defined(CONFIG_AGP_I460) && defined(__ia64__) + /* + * Free the page we grabbed for RPTR_ADDR + */ + if( !dev_priv->is_pci && dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off = + (unsigned long) dev_priv->ring.head; + + free_page(alt_rh_off); + } +#endif + DRM(free)( dev->dev_private, sizeof(drm_r128_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; diff -Nru a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c --- a/drivers/char/drm/radeon_cp.c Thu May 30 21:28:58 2002 +++ b/drivers/char/drm/radeon_cp.c Thu May 30 21:28:58 2002 @@ -575,7 +575,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, drm_radeon_private_t *dev_priv ) { - u32 ring_start, cur_read_ptr; + u32 ring_start, cur_read_ptr, rptr_addr; u32 tmp; /* Initialize the memory controller */ @@ -612,8 +612,26 @@ dev_priv->ring.tail = cur_read_ptr; if ( !dev_priv->is_pci ) { - RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, - dev_priv->ring_rptr->offset ); +#ifdef CONFIG_AGP_I460 + /* + * XXX - This is a 460GX specific hack + * + * We have to hack this right now. 460GX isn't claiming PCI + * writes from the card into the AGP aperture. Because of this, + * we have to get space outside of the aperture for RPTR_ADDR. + */ + if( dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off; + + alt_rh_off = __get_free_page(GFP_KERNEL | GFP_DMA); + + dev_priv->ring.head = (__volatile__ u32 *) alt_rh_off; + *dev_priv->ring.head = cur_read_ptr; + rptr_addr = __pa( dev_priv->ring.head ); + } else +#endif + rptr_addr = dev_priv->ring_rptr->offset; + RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, rptr_addr ); } else { drm_sg_mem_t *entry = dev->sg; unsigned long tmp_ofs, page_ofs; @@ -621,11 +639,19 @@ tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle; page_ofs = tmp_ofs >> PAGE_SHIFT; +#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400) RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", entry->busaddr[page_ofs], entry->handle + tmp_ofs ); +#else + RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, + entry->busaddr[page_ofs]); + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], + entry->handle + tmp_ofs ); +#endif } /* Set ring buffer size */ @@ -836,9 +862,9 @@ init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cp_ring ); - DRM_IOREMAP( dev_priv->ring_rptr ); - DRM_IOREMAP( dev_priv->buffers ); + DRM_IOREMAP( dev_priv->cp_ring, dev ); + DRM_IOREMAP( dev_priv->ring_rptr, dev ); + DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -981,9 +1007,9 @@ drm_radeon_private_t *dev_priv = dev->dev_private; if ( !dev_priv->is_pci ) { - DRM_IOREMAPFREE( dev_priv->cp_ring ); - DRM_IOREMAPFREE( dev_priv->ring_rptr ); - DRM_IOREMAPFREE( dev_priv->buffers ); + DRM_IOREMAPFREE( dev_priv->cp_ring, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->buffers, dev ); } else { if (!DRM(ati_pcigart_cleanup)( dev, dev_priv->phys_pci_gart, @@ -991,8 +1017,17 @@ DRM_ERROR( "failed to cleanup PCI GART!\n" ); } - DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), - DRM_MEM_DRIVER ); +#ifdef CONFIG_AGP_I460 + /* + * Free the page we grabbed for RPTR_ADDR + */ + if( !dev_priv->is_pci && dev->agp->agp_info.chipset == INTEL_460GX ) { + unsigned long alt_rh_off = (unsigned long) dev_priv->ring.head; + + free_page(alt_rh_off); + } +#endif + DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } diff -Nru a/drivers/char/mem.c b/drivers/char/mem.c --- a/drivers/char/mem.c Thu May 30 21:28:59 2002 +++ b/drivers/char/mem.c Thu May 30 21:28:59 2002 @@ -518,6 +518,7 @@ default: ret = -EINVAL; } + force_successful_syscall_return(); unlock_kernel(); return ret; } diff -Nru a/drivers/char/pc_keyb.c b/drivers/char/pc_keyb.c --- a/drivers/char/pc_keyb.c Thu May 30 21:28:59 2002 +++ b/drivers/char/pc_keyb.c Thu May 30 21:28:59 2002 @@ -808,6 +808,17 @@ { int status; +#ifdef CONFIG_IA64 + /* + * This is not really IA-64 specific. Probably ought to be done on all platforms + * that are (potentially) legacy-free. + */ + if (kbd_read_status() == 0xff && kbd_read_input() == 0xff) { + kbd_exists = 0; + return "No keyboard controller preset"; + } +#endif + /* * Test the keyboard interface. * This seems to be the only way to get it going. @@ -910,6 +921,10 @@ char *msg = initialize_kbd(); if (msg) printk(KERN_WARNING "initialize_kbd: %s\n", msg); +#ifdef CONFIG_IA64 + if (!kbd_exists) + return; +#endif } #if defined CONFIG_PSMOUSE diff -Nru a/drivers/ide/ide-geometry.c b/drivers/ide/ide-geometry.c --- a/drivers/ide/ide-geometry.c Thu May 30 21:28:58 2002 +++ b/drivers/ide/ide-geometry.c Thu May 30 21:28:58 2002 @@ -7,8 +7,11 @@ #include #include #include -#include #include + +#ifndef CONFIG_IA64 +# include +#endif #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) diff -Nru a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile --- a/drivers/media/radio/Makefile Thu May 30 21:28:58 2002 +++ b/drivers/media/radio/Makefile Thu May 30 21:28:58 2002 @@ -4,7 +4,7 @@ # Object file lists. -obj-y := +obj-y := dummy.o obj-m := obj-n := obj- := diff -Nru a/drivers/media/radio/dummy.c b/drivers/media/radio/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/media/radio/dummy.c Thu May 30 21:28:59 2002 @@ -0,0 +1 @@ +/* just so the linker knows what kind of object files it's deadling with... */ diff -Nru a/drivers/media/video/Makefile b/drivers/media/video/Makefile --- a/drivers/media/video/Makefile Thu May 30 21:28:59 2002 +++ b/drivers/media/video/Makefile Thu May 30 21:28:59 2002 @@ -4,7 +4,7 @@ # Object file lists. -obj-y := +obj-y := dummy.o obj-m := obj-n := obj- := diff -Nru a/drivers/media/video/dummy.c b/drivers/media/video/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/media/video/dummy.c Thu May 30 21:28:59 2002 @@ -0,0 +1 @@ +/* just so the linker knows what kind of object files it's deadling with... */ diff -Nru a/drivers/message/fusion/isense.c b/drivers/message/fusion/isense.c --- a/drivers/message/fusion/isense.c Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/isense.c Thu May 30 21:28:59 2002 @@ -5,12 +5,13 @@ * Error Report logging output. This module implements SCSI-3 * Opcode lookup and a sorted table of SCSI-3 ASC/ASCQ strings. * - * Copyright (c) 1991-2001 Steven J. Ralston + * Copyright (c) 1991-2002 Steven J. Ralston * Written By: Steven J. Ralston * (yes I wrote some of the orig. code back in 1991!) - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: isense.c,v 1.28.14.1 2001/08/24 20:07:04 sralston Exp $ + * $Id: isense.c,v 1.33 2002/02/27 18:44:19 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -49,11 +50,15 @@ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -#include +#include #include +#include #include #include -#include +#include +#if defined (__sparc__) +#include +#endif /* Hmmm, avoid undefined spinlock_t on lk-2.2.14-5.0 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) @@ -61,7 +66,7 @@ #endif #define MODULEAUTHOR "Steven J. Ralston" -#define COPYRIGHT "Copyright (c) 2001 " MODULEAUTHOR +#define COPYRIGHT "Copyright (c) 2001-2002 " MODULEAUTHOR #include "mptbase.h" #include "isense.h" @@ -86,7 +91,6 @@ MODULE_AUTHOR(MODULEAUTHOR); MODULE_DESCRIPTION(my_NAME); -MODULE_LICENSE("GPL"); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ int __init isense_init(void) diff -Nru a/drivers/message/fusion/lsi/fc_log.h b/drivers/message/fusion/lsi/fc_log.h --- a/drivers/message/fusion/lsi/fc_log.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/fc_log.h Thu May 30 21:28:58 2002 @@ -7,7 +7,7 @@ * in the IOCLogInfo field of a MPI Default Reply Message. * * CREATION DATE: 6/02/2000 - * ID: $Id: fc_log.h,v 4.5 2001/06/07 19:18:00 sschremm Exp $ + * ID: $Id: fc_log.h,v 4.6 2001/07/26 14:41:33 sschremm Exp $ */ @@ -62,7 +62,7 @@ MPI_IOCLOGINFO_FC_TARGET_MRSP_KILLED_BY_LIP = 0x2100000a, /* Manual Response not sent due to a LIP */ MPI_IOCLOGINFO_FC_TARGET_NO_CLASS_3 = 0x2100000b, /* not sent because remote node does not support Class 3 */ MPI_IOCLOGINFO_FC_TARGET_LOGIN_NOT_VALID = 0x2100000c, /* not sent because login to remote node not validated */ - MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound after a logout */ + MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound queue after a logout */ MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN = 0x2100000f, /* cleared waiting for data after a logout */ MPI_IOCLOGINFO_FC_LAN_BASE = 0x22000000, diff -Nru a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h --- a/drivers/message/fusion/lsi/mpi.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/mpi.h Thu May 30 21:28:58 2002 @@ -6,7 +6,7 @@ * Title: MPI Message independent structures and definitions * Creation Date: July 27, 2000 * - * MPI Version: 01.01.07 + * MPI Version: 01.02.03 * * Version History * --------------- @@ -39,6 +39,11 @@ * Added function codes for RAID. * 04-09-01 01.01.07 Added alternate define for MPI_DOORBELL_ACTIVE, * MPI_DOORBELL_USED, to better match the spec. + * 08-08-01 01.02.01 Original release for v1.2 work. + * Changed MPI_VERSION_MINOR from 0x01 to 0x02. + * Added define MPI_FUNCTION_TOOLBOX. + * 09-28-01 01.02.02 New function code MPI_SCSI_ENCLOSURE_PROCESSOR. + * 11-01-01 01.02.03 Changed name to MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR. * -------------------------------------------------------------------------- */ @@ -53,7 +58,7 @@ *****************************************************************************/ #define MPI_VERSION_MAJOR (0x01) -#define MPI_VERSION_MINOR (0x01) +#define MPI_VERSION_MINOR (0x02) #define MPI_VERSION ((MPI_VERSION_MAJOR << 8) | MPI_VERSION_MINOR) /* Note: The major versions of 0xe0 through 0xff are reserved */ @@ -216,8 +221,12 @@ #define MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND (0x13) #define MPI_FUNCTION_FC_PRIMITIVE_SEND (0x14) -#define MPI_FUNCTION_RAID_VOLUME (0x15) +#define MPI_FUNCTION_RAID_ACTION (0x15) #define MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) + +#define MPI_FUNCTION_TOOLBOX (0x17) + +#define MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) #define MPI_FUNCTION_LAN_SEND (0x20) #define MPI_FUNCTION_LAN_RECEIVE (0x21) diff -Nru a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h --- a/drivers/message/fusion/lsi/mpi_cnfg.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/mpi_cnfg.h Thu May 30 21:28:58 2002 @@ -6,7 +6,7 @@ * Title: MPI Config message, structures, and Pages * Creation Date: July 27, 2000 * - * MPI Version: 01.01.11 + * MPI Version: 01.02.05 * * Version History * --------------- @@ -72,6 +72,42 @@ * Added IO Unit Page 3. * Modified defines for Scsi Port Page 2. * Modified RAID Volume Pages. + * 08-08-01 01.02.01 Original release for v1.2 work. + * Added SepID and SepBus to RVP2 IMPhysicalDisk struct. + * Added defines for the SEP bits in RVP2 VolumeSettings. + * Modified the DeviceSettings field in RVP2 to use the + * proper structure. + * Added defines for SES, SAF-TE, and cross channel for + * IOCPage2 CapabilitiesFlags. + * Removed define for MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE. + * Removed define for + * MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE. + * Added define for MPI_CONFIG_PAGEATTR_RO_PERSISTENT. + * 08-29-01 01.02.02 Fixed value for MPI_MANUFACTPAGE_DEVID_53C1035. + * Added defines for MPI_FCPORTPAGE1_FLAGS_HARD_ALPA_ONLY + * and MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY. + * Removed MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS, + * MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS, and + * MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS, and + * MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED. + * Added defines for MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED + * and MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED. + * Added OnBusTimerValue to CONFIG_PAGE_SCSI_PORT_1. + * Added rejected bits to SCSI Device Page 0 Information. + * Increased size of ALPA array in FC Port Page 2 by one + * and removed a one byte reserved field. + * 09-28-01 01.02.03 Swapped NegWireSpeedLow and NegWireSpeedLow in + * CONFIG_PAGE_LAN_1 to match preferred 64-bit ordering. + * Added structures for Manufacturing Page 4, IO Unit + * Page 3, IOC Page 3, IOC Page 4, RAID Volume Page 0, and + * RAID PhysDisk Page 0. + * 10-04-01 01.02.04 Added define for MPI_CONFIG_PAGETYPE_RAID_PHYSDISK. + * Modified some of the new defines to make them 32 + * character unique. + * Modified how variable length pages (arrays) are defined. + * Added generic defines for hot spare pools and RAID + * volume types. + * 11-01-01 01.02.05 Added define for MPI_IOUNITPAGE1_DISABLE_IR. * -------------------------------------------------------------------------- */ @@ -104,12 +140,13 @@ fCONFIG_PAGE_HEADER_UNION, MPI_POINTER PTR_CONFIG_PAGE_HEADER_UNION; -/****************************************************************************/ -/* PageType field values */ -/****************************************************************************/ +/**************************************************************************** +* PageType field values +****************************************************************************/ #define MPI_CONFIG_PAGEATTR_READ_ONLY (0x00) #define MPI_CONFIG_PAGEATTR_CHANGEABLE (0x10) #define MPI_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI_CONFIG_PAGEATTR_RO_PERSISTENT (0x30) #define MPI_CONFIG_PAGEATTR_MASK (0xF0) #define MPI_CONFIG_PAGETYPE_IO_UNIT (0x00) @@ -122,29 +159,21 @@ #define MPI_CONFIG_PAGETYPE_LAN (0x07) #define MPI_CONFIG_PAGETYPE_RAID_VOLUME (0x08) #define MPI_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) #define MPI_CONFIG_PAGETYPE_MASK (0x0F) #define MPI_CONFIG_TYPENUM_MASK (0x0FFF) /**************************************************************************** - * PageAddres field values - ****************************************************************************/ +* PageAddress field values +****************************************************************************/ #define MPI_SCSI_PORT_PGAD_PORT_MASK (0x000000FF) -#define MPI_SCSI_DEVICE_FORM_MASK (0xF0000000) -#define MPI_SCSI_DEVICE_FORM_TARGETID (0x00000000) -#define MPI_SCSI_DEVICE_FORM_RAID_PHYS_DEV_NUM (0x10000000) #define MPI_SCSI_DEVICE_TARGET_ID_MASK (0x000000FF) #define MPI_SCSI_DEVICE_TARGET_ID_SHIFT (0) #define MPI_SCSI_DEVICE_BUS_MASK (0x0000FF00) #define MPI_SCSI_DEVICE_BUS_SHIFT (8) -#define MPI_SCSI_DEVICE_VOLUME_TARG_ID_MASK (0x000000FF) -#define MPI_SCSI_DEVICE_VOLUME_TARG_ID_SHIFT (0) -#define MPI_SCSI_DEVICE_VOLUME_BUS_MASK (0x0000FF00) -#define MPI_SCSI_DEVICE_VOLUME_BUS_SHIFT (8) -#define MPI_SCSI_DEVICE_PHYS_DISK_NUM_MASK (0x00FF0000) -#define MPI_SCSI_DEVICE_PHYS_DISK_NUM_SHIFT (16) #define MPI_FC_PORT_PGAD_PORT_MASK (0xF0000000) #define MPI_FC_PORT_PGAD_PORT_SHIFT (28) @@ -167,10 +196,14 @@ #define MPI_FC_DEVICE_PGAD_BT_TID_MASK (0x000000FF) #define MPI_FC_DEVICE_PGAD_BT_TID_SHIFT (0) +#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT (0) + -/****************************************************************************/ -/* Config Request Message */ -/****************************************************************************/ + +/**************************************************************************** +* Config Request Message +****************************************************************************/ typedef struct _MSG_CONFIG { U8 Action; /* 00h */ @@ -181,16 +214,16 @@ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U8 Reserved2[8]; /* 0Ch */ - fCONFIG_PAGE_HEADER Header; /* 14h */ + fCONFIG_PAGE_HEADER Header; /* 14h */ U32 PageAddress; /* 18h */ SGE_IO_UNION PageBufferSGE; /* 1Ch */ } MSG_CONFIG, MPI_POINTER PTR_MSG_CONFIG, Config_t, MPI_POINTER pConfig_t; -/****************************************************************************/ -/* Action field values */ -/****************************************************************************/ +/**************************************************************************** +* Action field values +****************************************************************************/ #define MPI_CONFIG_ACTION_PAGE_HEADER (0x00) #define MPI_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) #define MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) @@ -213,7 +246,7 @@ U8 Reserved2[2]; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ - fCONFIG_PAGE_HEADER Header; /* 14h */ + fCONFIG_PAGE_HEADER Header; /* 14h */ } MSG_CONFIG_REPLY, MPI_POINTER PTR_MSG_CONFIG_REPLY, ConfigReply_t, MPI_POINTER pConfigReply_t; @@ -225,19 +258,24 @@ * *****************************************************************************/ -/****************************************************************************/ -/* Manufacturing Config pages */ -/****************************************************************************/ +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ #define MPI_MANUFACTPAGE_DEVICEID_FC909 (0x0621) #define MPI_MANUFACTPAGE_DEVICEID_FC919 (0x0624) #define MPI_MANUFACTPAGE_DEVICEID_FC929 (0x0622) +#define MPI_MANUFACTPAGE_DEVICEID_FC919X (0x0628) +#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626) #define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030) #define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031) -#define MPI_MANUFACTPAGE_DEVID_53C1035 (0x0035) +#define MPI_MANUFACTPAGE_DEVID_1030_53C1035 (0x0032) +#define MPI_MANUFACTPAGE_DEVID_1030ZC_53C1035 (0x0033) +#define MPI_MANUFACTPAGE_DEVID_53C1035 (0x0040) +#define MPI_MANUFACTPAGE_DEVID_53C1035ZC (0x0041) typedef struct _CONFIG_PAGE_MANUFACTURING_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U8 ChipName[16]; /* 04h */ U8 ChipRevision[8]; /* 14h */ U8 BoardName[16]; /* 1Ch */ @@ -252,7 +290,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U8 VPD[256]; /* 04h */ } fCONFIG_PAGE_MANUFACTURING_1, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_1, ManufacturingPage1_t, MPI_POINTER pManufacturingPage1_t; @@ -269,35 +307,72 @@ MpiChipRevisionId_t, MPI_POINTER pMpiChipRevisionId_t; +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + typedef struct _CONFIG_PAGE_MANUFACTURING_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - MPI_CHIP_REVISION_ID ChipId; /* 04h */ - U32 HwSettings[1]; /* 08h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + MPI_CHIP_REVISION_ID ChipId; /* 04h */ + U32 HwSettings[MPI_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 08h */ } fCONFIG_PAGE_MANUFACTURING_2, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_2, ManufacturingPage2_t, MPI_POINTER pManufacturingPage2_t; #define MPI_MANUFACTURING2_PAGEVERSION (0x00) +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_MAN_PAGE_3_INFO_WORDS +#define MPI_MAN_PAGE_3_INFO_WORDS (1) +#endif + typedef struct _CONFIG_PAGE_MANUFACTURING_3 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - MPI_CHIP_REVISION_ID ChipId; /* 04h */ - U32 Info[1]; /* 08h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + MPI_CHIP_REVISION_ID ChipId; /* 04h */ + U32 Info[MPI_MAN_PAGE_3_INFO_WORDS];/* 08h */ } fCONFIG_PAGE_MANUFACTURING_3, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_3, ManufacturingPage3_t, MPI_POINTER pManufacturingPage3_t; #define MPI_MANUFACTURING3_PAGEVERSION (0x00) -/****************************************************************************/ -/* IO Unit Config Pages */ -/****************************************************************************/ +typedef struct _CONFIG_PAGE_MANUFACTURING_4 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U32 Reserved1; /* 04h */ + U8 InfoOffset0; /* 08h */ + U8 InfoSize0; /* 09h */ + U8 InfoOffset1; /* 0Ah */ + U8 InfoSize1; /* 0Bh */ + U8 InquirySize; /* 0Ch */ + U8 Reserved2; /* 0Dh */ + U16 Reserved3; /* 0Eh */ + U8 InquiryData[56]; /* 10h */ + U32 ISVolumeSettings; /* 48h */ + U32 IMEVolumeSettings; /* 4Ch */ + U32 IMVolumeSettings; /* 50h */ +} fCONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4, + ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t; + +#define MPI_MANUFACTURING4_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_IO_UNIT_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U64 UniqueValue; /* 04h */ } fCONFIG_PAGE_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_0, IOUnitPage0_t, MPI_POINTER pIOUnitPage0_t; @@ -307,18 +382,20 @@ typedef struct _CONFIG_PAGE_IO_UNIT_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ } fCONFIG_PAGE_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_1, IOUnitPage1_t, MPI_POINTER pIOUnitPage1_t; #define MPI_IOUNITPAGE1_PAGEVERSION (0x00) +/* IO Unit Page 1 Flags defines */ + #define MPI_IOUNITPAGE1_MULTI_FUNCTION (0x00000000) #define MPI_IOUNITPAGE1_SINGLE_FUNCTION (0x00000001) #define MPI_IOUNITPAGE1_MULTI_PATHING (0x00000002) #define MPI_IOUNITPAGE1_SINGLE_PATHING (0x00000000) - +#define MPI_IOUNITPAGE1_DISABLE_IR (0x00000040) #define MPI_IOUNITPAGE1_FORCE_32 (0x00000080) @@ -335,7 +412,7 @@ typedef struct _CONFIG_PAGE_IO_UNIT_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U32 BiosVersion; /* 08h */ MPI_ADAPTER_INFO AdapterOrder[4]; /* 0Ch */ @@ -344,38 +421,45 @@ #define MPI_IOUNITPAGE2_PAGEVERSION (0x00) -#define MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE (0x00000001) #define MPI_IOUNITPAGE2_FLAGS_PAUSE_ON_ERROR (0x00000002) #define MPI_IOUNITPAGE2_FLAGS_VERBOSE_ENABLE (0x00000004) #define MPI_IOUNITPAGE2_FLAGS_COLOR_VIDEO_DISABLE (0x00000008) #define MPI_IOUNITPAGE2_FLAGS_DONT_HOOK_INT_40 (0x00000010) +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) +#endif + typedef struct _CONFIG_PAGE_IO_UNIT_3 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - U32 VolumeSettings; /* 04h */ - U8 InfoOffset0; /* 08h */ - U8 InfoSize0; /* 09h */ - U8 InfoOffset1; /* 0Ah */ - U8 InfoSize1; /* 0Bh */ - U8 InquirySize; /* 0Ch */ - U8 Reserved; /* 0Dh */ - U16 Reserved2; /* 0Eh */ - U8 InquiryData[56]; /* 10h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 GPIOCount; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 06h */ + U16 GPIOVal[MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX]; /* 08h */ } fCONFIG_PAGE_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_3, IOUnitPage3_t, MPI_POINTER pIOUnitPage3_t; -#define MPI_IOUNITPAGE3_PAGEVERSION (0x00) +#define MPI_IOUNITPAGE3_PAGEVERSION (0x01) + +#define MPI_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFC) +#define MPI_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI_IOUNITPAGE3_GPIO_SETTING_OFF (0x00) +#define MPI_IOUNITPAGE3_GPIO_SETTING_ON (0x01) -/****************************************************************************/ -/* IOC Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_IOC_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 TotalNVStore; /* 04h */ U32 FreeNVStore; /* 08h */ U16 VendorID; /* 0Ch */ @@ -393,7 +477,7 @@ typedef struct _CONFIG_PAGE_IOC_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U32 CoalescingTimeout; /* 08h */ U8 CoalescingDepth; /* 0Ch */ @@ -408,53 +492,120 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL { - U8 VolumeTargetID; /* 00h */ - U8 VolumeBus; /* 01h */ - U16 Reserved; /* 02h */ - U8 VolumeVersionMinor; /* 04h */ - U8 VolumeVersionMajor; /* 05h */ - U8 VolumeRaidType; /* 06h */ - U8 Reserved1; /* 07h */ + U8 VolumeID; /* 00h */ + U8 VolumeBus; /* 01h */ + U8 VolumeIOC; /* 02h */ + U8 VolumePageNumber; /* 03h */ + U8 VolumeType; /* 04h */ + U8 Reserved2; /* 05h */ + U16 Reserved3; /* 06h */ } fCONFIG_PAGE_IOC_2_RAID_VOL, MPI_POINTER PTR_CONFIG_PAGE_IOC_2_RAID_VOL, ConfigPageIoc2RaidVol_t, MPI_POINTER pConfigPageIoc2RaidVol_t; +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX +#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1) +#endif + typedef struct _CONFIG_PAGE_IOC_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - U32 CapabilitiesFlags; /* 04h */ - U8 NumActiveVolumes; /* 08h */ - U8 MaxVolumes; /* 09h */ - U16 Reserved; /* 0Ah */ - fCONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[1]; /* 0Ch */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U32 CapabilitiesFlags; /* 04h */ + U8 NumActiveVolumes; /* 08h */ + U8 MaxVolumes; /* 09h */ + U8 NumActivePhysDisks; /* 0Ah */ + U8 MaxPhysDisks; /* 0Bh */ + fCONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */ } fCONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, IOCPage2_t, MPI_POINTER pIOCPage2_t; -#define MPI_IOCPAGE2_PAGEVERSION (0x00) +#define MPI_IOCPAGE2_PAGEVERSION (0x01) /* IOC Page 2 Capabilities flags */ -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_0_SUPPORT (0x00000001) -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_1_SUPPORT (0x00000002) -#define MPI_IOCPAGE2_CAP_FLAGS_LSI_MIRROR_SUPPORT (0x00000004) -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_5_SUPPORT (0x00000008) -#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000010) - -/* IOC Page 2 Volume RAID Type values */ - -#define MPI_IOCPAGE2_VOL_TYPE_RAID_0 (0x00) -#define MPI_IOCPAGE2_VOL_TYPE_RAID_1 (0x01) -#define MPI_IOCPAGE2_VOL_TYPE_LSI_MIRROR (0x02) -#define MPI_IOCPAGE2_VOL_TYPE_RAID_5 (0x05) -#define MPI_IOCPAGE2_VOL_TYPE_RAID_10 (0x0A) - - -/****************************************************************************/ -/* SCSI Port Config Pages */ -/****************************************************************************/ +#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001) +#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002) +#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004) +#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000) +#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000) +#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000) + +/* IOC Page 2 Volume RAID Type values, also used in RAID Volume pages */ + +#define MPI_RAID_VOL_TYPE_IS (0x00) +#define MPI_RAID_VOL_TYPE_IME (0x01) +#define MPI_RAID_VOL_TYPE_IM (0x02) + + +typedef struct _IOC_3_PHYS_DISK +{ + U8 PhysDiskID; /* 00h */ + U8 PhysDiskBus; /* 01h */ + U8 PhysDiskIOC; /* 02h */ + U8 PhysDiskNum; /* 03h */ +} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK, + Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX +#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1) +#endif + +typedef struct _CONFIG_PAGE_IOC_3 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 NumPhysDisks; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 06h */ + IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */ +} fCONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3, + IOCPage3_t, MPI_POINTER pIOCPage3_t; + +#define MPI_IOCPAGE3_PAGEVERSION (0x00) + + +typedef struct _IOC_4_SEP +{ + U8 SEPTargetID; /* 00h */ + U8 SEPBus; /* 01h */ + U16 Reserved; /* 02h */ +} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP, + Ioc4Sep_t, MPI_POINTER pIoc4Sep_t; + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_IOC_PAGE_4_SEP_MAX +#define MPI_IOC_PAGE_4_SEP_MAX (1) +#endif + +typedef struct _CONFIG_PAGE_IOC_4 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 ActiveSEP; /* 04h */ + U8 MaxSEP; /* 05h */ + U16 Reserved1; /* 06h */ + IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */ +} fCONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4, + IOCPage4_t, MPI_POINTER pIOCPage4_t; + +#define MPI_IOCPAGE4_PAGEVERSION (0x00) + + +/**************************************************************************** +* SCSI Port Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_SCSI_PORT_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Capabilities; /* 04h */ U32 PhysicalInterface; /* 08h */ } fCONFIG_PAGE_SCSI_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_0, @@ -465,7 +616,6 @@ #define MPI_SCSIPORTPAGE0_CAP_IU (0x00000001) #define MPI_SCSIPORTPAGE0_CAP_DT (0x00000002) #define MPI_SCSIPORTPAGE0_CAP_QAS (0x00000004) -#define MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS (0x00000008) #define MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK (0x0000FF00) #define MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK (0x00FF0000) #define MPI_SCSIPORTPAGE0_CAP_WIDE (0x20000000) @@ -479,12 +629,13 @@ typedef struct _CONFIG_PAGE_SCSI_PORT_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Configuration; /* 04h */ + U32 OnBusTimerValue; /* 08h */ } fCONFIG_PAGE_SCSI_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_1, SCSIPortPage1_t, MPI_POINTER pSCSIPortPage1_t; -#define MPI_SCSIPORTPAGE1_PAGEVERSION (0x01) +#define MPI_SCSIPORTPAGE1_PAGEVERSION (0x02) #define MPI_SCSIPORTPAGE1_CFG_PORT_SCSI_ID_MASK (0x000000FF) #define MPI_SCSIPORTPAGE1_CFG_PORT_RESPONSE_ID_MASK (0xFFFF0000) @@ -500,7 +651,7 @@ typedef struct _CONFIG_PAGE_SCSI_PORT_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 PortFlags; /* 04h */ U32 PortSettings; /* 08h */ MPI_DEVICE_INFO DeviceSettings[16]; /* 0Ch */ @@ -510,7 +661,6 @@ #define MPI_SCSIPORTPAGE2_PAGEVERSION (0x01) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_SCAN_HIGH_TO_LOW (0x00000001) -#define MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE (0x00000002) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET (0x00000004) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_ALTERNATE_CHS (0x00000008) #define MPI_SCSIPORTPAGE2_PORT_FLAGS_TERMINATION_DISABLE (0x00000010) @@ -536,47 +686,48 @@ #define MPI_SCSIPORTPAGE2_DEVICE_BOOT_CHOICE (0x0020) -/****************************************************************************/ -/* SCSI Target Device Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* SCSI Target Device Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_SCSI_DEVICE_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 NegotiatedParameters; /* 04h */ U32 Information; /* 08h */ } fCONFIG_PAGE_SCSI_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_0, SCSIDevicePage0_t, MPI_POINTER pSCSIDevicePage0_t; -#define MPI_SCSIDEVPAGE0_PAGEVERSION (0x01) +#define MPI_SCSIDEVPAGE0_PAGEVERSION (0x02) #define MPI_SCSIDEVPAGE0_NP_IU (0x00000001) #define MPI_SCSIDEVPAGE0_NP_DT (0x00000002) #define MPI_SCSIDEVPAGE0_NP_QAS (0x00000004) -#define MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS (0x00000008) #define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK (0x0000FF00) #define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK (0x00FF0000) #define MPI_SCSIDEVPAGE0_NP_WIDE (0x20000000) #define MPI_SCSIDEVPAGE0_NP_AIP (0x80000000) #define MPI_SCSIDEVPAGE0_INFO_PARAMS_NEGOTIATED (0x00000001) +#define MPI_SCSIDEVPAGE0_INFO_SDTR_REJECTED (0x00000002) +#define MPI_SCSIDEVPAGE0_INFO_WDTR_REJECTED (0x00000004) +#define MPI_SCSIDEVPAGE0_INFO_PPR_REJECTED (0x00000008) typedef struct _CONFIG_PAGE_SCSI_DEVICE_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 RequestedParameters; /* 04h */ U32 Reserved; /* 08h */ U32 Configuration; /* 0Ch */ } fCONFIG_PAGE_SCSI_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_1, SCSIDevicePage1_t, MPI_POINTER pSCSIDevicePage1_t; -#define MPI_SCSIDEVPAGE1_PAGEVERSION (0x02) +#define MPI_SCSIDEVPAGE1_PAGEVERSION (0x03) #define MPI_SCSIDEVPAGE1_RP_IU (0x00000001) #define MPI_SCSIDEVPAGE1_RP_DT (0x00000002) #define MPI_SCSIDEVPAGE1_RP_QAS (0x00000004) -#define MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS (0x00000008) #define MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK (0x0000FF00) #define MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK (0x00FF0000) #define MPI_SCSIDEVPAGE1_RP_WIDE (0x20000000) @@ -585,12 +736,13 @@ #define MPI_SCSIDEVPAGE1_DV_LVD_DRIVE_STRENGTH_MASK (0x00000003) #define MPI_SCSIDEVPAGE1_DV_SE_SLEW_RATE_MASK (0x00000300) -#define MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED (0x00000001) +#define MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED (0x00000002) +#define MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED (0x00000004) typedef struct _CONFIG_PAGE_SCSI_DEVICE_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 DomainValidation; /* 04h */ U32 ParityPipeSelect; /* 08h */ U32 DataPipeSelect; /* 0Ch */ @@ -629,13 +781,13 @@ #define MPI_SCSIDEVPAGE2_DPS_BIT_15_PL_SELECT_MASK (0xC0000000) -/****************************************************************************/ -/* FC Port Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* FC Port Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_FC_PORT_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U8 MPIPortNumber; /* 08h */ U8 LinkType; /* 09h */ @@ -715,7 +867,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_1 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Flags; /* 04h */ U64 NoSEEPROMWWNN; /* 08h */ U64 NoSEEPROMWWPN; /* 10h */ @@ -726,8 +878,10 @@ } fCONFIG_PAGE_FC_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_1, FCPortPage1_t, MPI_POINTER pFCPortPage1_t; -#define MPI_FCPORTPAGE1_PAGEVERSION (0x01) +#define MPI_FCPORTPAGE1_PAGEVERSION (0x02) +#define MPI_FCPORTPAGE1_FLAGS_EXT_FCP_STATUS_EN (0x08000000) +#define MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY (0x04000000) #define MPI_FCPORTPAGE1_FLAGS_SORT_BY_DID (0x00000001) #define MPI_FCPORTPAGE1_FLAGS_SORT_BY_WWN (0x00000000) @@ -747,22 +901,21 @@ #define MPI_FCPORTPAGE1_LCONFIG_SPEED_10GIG (0x03) #define MPI_FCPORTPAGE1_LCONFIG_SPEED_AUTO (0x0F) -#define MPI_FCPORTPAGE1_TOPOLGY_MASK (0x0F) -#define MPI_FCPORTPAGE1_TOPOLGY_NLPORT (0x01) -#define MPI_FCPORTPAGE1_TOPOLGY_NPORT (0x02) -#define MPI_FCPORTPAGE1_TOPOLGY_AUTO (0x0F) +#define MPI_FCPORTPAGE1_TOPOLOGY_MASK (0x0F) +#define MPI_FCPORTPAGE1_TOPOLOGY_NLPORT (0x01) +#define MPI_FCPORTPAGE1_TOPOLOGY_NPORT (0x02) +#define MPI_FCPORTPAGE1_TOPOLOGY_AUTO (0x0F) typedef struct _CONFIG_PAGE_FC_PORT_2 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U8 NumberActive; /* 04h */ - U8 ALPA[126]; /* 05h */ - U8 Reserved; /* 83h */ + U8 ALPA[127]; /* 05h */ } fCONFIG_PAGE_FC_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_2, FCPortPage2_t, MPI_POINTER pFCPortPage2_t; -#define MPI_FCPORTPAGE2_PAGEVERSION (0x00) +#define MPI_FCPORTPAGE2_PAGEVERSION (0x01) typedef struct _WWN_FORMAT @@ -795,10 +948,18 @@ #define MPI_PERSISTENT_FLAGS_BOOT_DEVICE (0x0008) #define MPI_PERSISTENT_FLAGS_BY_DID (0x0080) +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_FC_PORT_PAGE_3_ENTRY_MAX +#define MPI_FC_PORT_PAGE_3_ENTRY_MAX (1) +#endif + typedef struct _CONFIG_PAGE_FC_PORT_3 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - FC_PORT_PERSISTENT Entry[1]; /* 04h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + FC_PORT_PERSISTENT Entry[MPI_FC_PORT_PAGE_3_ENTRY_MAX]; /* 04h */ } fCONFIG_PAGE_FC_PORT_3, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_3, FCPortPage3_t, MPI_POINTER pFCPortPage3_t; @@ -807,7 +968,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_4 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 PortFlags; /* 04h */ U32 PortSettings; /* 08h */ } fCONFIG_PAGE_FC_PORT_4, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_4, @@ -833,13 +994,22 @@ U16 Reserved; /* 02h */ U64 AliasWWNN; /* 04h */ U64 AliasWWPN; /* 0Ch */ -} fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO, +} fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO, + MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO, FcPortPage5AliasInfo_t, MPI_POINTER pFcPortPage5AliasInfo_t; +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_FC_PORT_PAGE_5_ALIAS_MAX +#define MPI_FC_PORT_PAGE_5_ALIAS_MAX (1) +#endif + typedef struct _CONFIG_PAGE_FC_PORT_5 { - fCONFIG_PAGE_HEADER Header; /* 00h */ - fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO AliasInfo[1]; /* 04h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO AliasInfo[MPI_FC_PORT_PAGE_5_ALIAS_MAX];/* 04h */ } fCONFIG_PAGE_FC_PORT_5, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5, FCPortPage5_t, MPI_POINTER pFCPortPage5_t; @@ -851,7 +1021,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Reserved; /* 04h */ U64 TimeSinceReset; /* 08h */ U64 TxFrames; /* 10h */ @@ -877,7 +1047,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_7 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Reserved; /* 04h */ U8 PortSymbolicName[256]; /* 08h */ } fCONFIG_PAGE_FC_PORT_7, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_7, @@ -888,7 +1058,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_8 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 BitVector[8]; /* 04h */ } fCONFIG_PAGE_FC_PORT_8, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_8, FCPortPage8_t, MPI_POINTER pFCPortPage8_t; @@ -898,7 +1068,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_9 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U32 Reserved; /* 04h */ U64 GlobalWWPN; /* 08h */ U64 GlobalWWNN; /* 10h */ @@ -916,13 +1086,13 @@ #define MPI_FCPORTPAGE9_PAGEVERSION (0x00) -/****************************************************************************/ -/* FC Device Config Pages */ -/****************************************************************************/ +/**************************************************************************** +* FC Device Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_FC_DEVICE_0 { - fCONFIG_PAGE_HEADER Header; /* 00h */ + fCONFIG_PAGE_HEADER Header; /* 00h */ U64 WWNN; /* 04h */ U64 WWPN; /* 0Ch */ U32 PortIdentifier; /* 14h */ @@ -947,112 +1117,191 @@ #define MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET (0x02) #define MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR (0x04) -#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK (MPI_FC_DEVICE_PGAD_PORT_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK (MPI_FC_DEVICE_PGAD_FORM_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID) -#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID (MPI_FC_DEVICE_PGAD_FORM_BUS_TID) -#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK (MPI_FC_DEVICE_PGAD_ND_DID_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK (MPI_FC_DEVICE_PGAD_BT_BUS_MASK) -#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT) -#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK (MPI_FC_DEVICE_PGAD_BT_TID_MASK) - - -/****************************************************************************/ -/* RAID Volume Config Pages */ -/****************************************************************************/ - -typedef struct _RAIDVOL2_IM_PHYS_ID -{ - U8 TargetID; /* 00h */ - U8 Bus; /* 01h */ - U8 IocNumber; /* 02h */ - U8 PhysDiskNumber; /* 03h */ - U8 Reserved[8]; /* 04h */ - U8 PhysicalDiskIdentifier[16]; /* 0Ch */ - U8 VendorId[8]; /* 1Ch */ - U8 ProductId[16]; /* 24h */ - U8 ProductRevLevel[4]; /* 34h */ - U32 Reserved1; /* 38h */ - U8 Info[32]; /* 3Ch */ -} RAIDVOL2_IM_PHYS_ID, MPI_POINTER PTR_RAIDVOL2_IM_PHYS_ID, - RaidVol2ImPhysicalID_t, MPI_POINTER pRaidVol2ImPhysicalID_t; - -typedef struct _RAIDVOL2_IM_DISK_INFO -{ - U32 DiskStatus; /* 00h */ - U32 DeviceSettings; /* 04h */ - U16 ErrorCount; /* 08h */ - U16 Reserved; /* 0Ah */ - U8 ErrorCdbByte; /* 0Ch */ - U8 ErrorSenseKey; /* 0Dh */ - U8 ErrorASC; /* 0Eh */ - U8 ErrorASCQ; /* 0Fh */ - U16 SmartCount; /* 10h */ - U8 SmartASC; /* 12h */ - U8 SmartASCQ; /* 13h */ -} RAIDVOL2_IM_DISK_INFO, MPI_POINTER PTR_RAIDVOL2_IM_DISK_INFO, - RaidVol2ImDiskInfo_t, MPI_POINTER pRaidVol2ImDiskInfo_t; +#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK (MPI_FC_DEVICE_PGAD_PORT_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK (MPI_FC_DEVICE_PGAD_FORM_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID) +#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID (MPI_FC_DEVICE_PGAD_FORM_BUS_TID) +#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK (MPI_FC_DEVICE_PGAD_ND_DID_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK (MPI_FC_DEVICE_PGAD_BT_BUS_MASK) +#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT) +#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK (MPI_FC_DEVICE_PGAD_BT_TID_MASK) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +typedef struct _RAID_VOL0_PHYS_DISK +{ + U16 Reserved; /* 00h */ + U8 PhysDiskMap; /* 02h */ + U8 PhysDiskNum; /* 03h */ +} RAID_VOL0_PHYS_DISK, MPI_POINTER PTR_RAID_VOL0_PHYS_DISK, + RaidVol0PhysDisk_t, MPI_POINTER pRaidVol0PhysDisk_t; + +#define MPI_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _RAID_VOL0_STATUS +{ + U8 Flags; /* 00h */ + U8 State; /* 01h */ + U16 Reserved; /* 02h */ +} RAID_VOL0_STATUS, MPI_POINTER PTR_RAID_VOL0_STATUS, + RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t; + +/* RAID Volume Page 0 VolumeStatus defines */ + +#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01) +#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02) +#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04) + +#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00) +#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01) +#define MPI_RAIDVOL0_STATUS_STATE_FAILED (0x02) + +typedef struct _RAID_VOL0_SETTINGS +{ + U16 Settings; /* 00h */ + U8 HotSparePool; /* 01h */ /* MPI_RAID_HOT_SPARE_POOL_ */ + U8 Reserved; /* 02h */ +} RAID_VOL0_SETTINGS, MPI_POINTER PTR_RAID_VOL0_SETTINGS, + RaidVol0Settings, MPI_POINTER pRaidVol0Settings; + +/* RAID Volume Page 0 VolumeSettings defines */ + +#define MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE (0x0001) +#define MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART (0x0002) +#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004) +#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008) +#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010) +#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000) + +/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI_RAID_HOT_SPARE_POOL_7 (0x80) + +/* + * Host code (drivers, BIOS, utilities, etc.) should leave this define set to + * one and check Header.PageLength at runtime. + */ +#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _CONFIG_PAGE_RAID_VOL_0 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 VolumeID; /* 04h */ + U8 VolumeBus; /* 05h */ + U8 VolumeIOC; /* 06h */ + U8 VolumeType; /* 07h */ /* MPI_RAID_VOL_TYPE_ */ + RAID_VOL0_STATUS VolumeStatus; /* 08h */ + RAID_VOL0_SETTINGS VolumeSettings; /* 0Ch */ + U32 MaxLBA; /* 10h */ + U32 Reserved1; /* 14h */ + U32 StripeSize; /* 18h */ + U32 Reserved2; /* 1Ch */ + U32 Reserved3; /* 20h */ + U8 NumPhysDisks; /* 24h */ + U8 Reserved4; /* 25h */ + U16 Reserved5; /* 26h */ + RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */ +} fCONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, + RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; + +#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x00) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +typedef struct _RAID_PHYS_DISK0_ERROR_DATA +{ + U8 ErrorCdbByte; /* 00h */ + U8 ErrorSenseKey; /* 01h */ + U16 Reserved; /* 02h */ + U16 ErrorCount; /* 04h */ + U8 ErrorASC; /* 06h */ + U8 ErrorASCQ; /* 07h */ + U16 SmartCount; /* 08h */ + U8 SmartASC; /* 0Ah */ + U8 SmartASCQ; /* 0Bh */ +} RAID_PHYS_DISK0_ERROR_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_ERROR_DATA, + RaidPhysDisk0ErrorData_t, MPI_POINTER pRaidPhysDisk0ErrorData_t; + +typedef struct _RAID_PHYS_DISK_INQUIRY_DATA +{ + U8 VendorID[8]; /* 00h */ + U8 ProductID[16]; /* 08h */ + U8 ProductRevLevel[4]; /* 18h */ + U8 Info[32]; /* 1Ch */ +} RAID_PHYS_DISK0_INQUIRY_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_INQUIRY_DATA, + RaidPhysDisk0InquiryData, MPI_POINTER pRaidPhysDisk0InquiryData; + +typedef struct _RAID_PHYS_DISK0_SETTINGS +{ + U8 SepID; /* 00h */ + U8 SepBus; /* 01h */ + U8 HotSparePool; /* 02h */ /* MPI_RAID_HOT_SPARE_POOL_ */ + U8 PhysDiskSettings; /* 03h */ +} RAID_PHYS_DISK0_SETTINGS, MPI_POINTER PTR_RAID_PHYS_DISK0_SETTINGS, + RaidPhysDiskSettings_t, MPI_POINTER pRaidPhysDiskSettings_t; + +typedef struct _RAID_PHYS_DISK0_STATUS +{ + U8 Flags; /* 00h */ + U8 State; /* 01h */ + U16 Reserved; /* 02h */ +} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS, + RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t; /* RAID Volume 2 IM Physical Disk DiskStatus flags */ -#define MPI_RVP2_PHYS_DISK_PRIMARY (0x00000001) -#define MPI_RVP2_PHYS_DISK_SECONDARY (0x00000002) -#define MPI_RVP2_PHYS_DISK_HOT_SPARE (0x00000004) -#define MPI_RVP2_PHYS_DISK_OUT_OF_SYNC (0x00000008) -#define MPI_RVP2_PHYS_DISK_STATUS_MASK (0x00000F00) -#define MPI_RVP2_PHYS_DISK_STATUS_ONLINE (0x00000000) -#define MPI_RVP2_PHYS_DISK_STATUS_MISSING (0x00000100) -#define MPI_RVP2_PHYS_DISK_STATUS_NOT_COMPATIBLE (0x00000200) -#define MPI_RVP2_PHYS_DISK_STATUS_FAILED (0x00000300) -#define MPI_RVP2_PHYS_DISK_STATUS_INITIALIZING (0x00000400) -#define MPI_RVP2_PHYS_DISK_STATUS_OFFLINE_REQUESTED (0x00000500) -#define MPI_RVP2_PHYS_DISK_STATUS_OTHER_OFFLINE (0x00000F00) - - -typedef struct _RAIDVOL2_IM_PHYSICAL_DISK -{ - RAIDVOL2_IM_PHYS_ID Id; /* 00h */ - RAIDVOL2_IM_DISK_INFO Info; /* 5Ch */ -} RAIDVOL2_IM_PHYSICAL_DISK, MPI_POINTER PTR_RAIDVOL2_IM_PHYSICAL_DISK, - RaidVol2ImPhysicalDisk_t, MPI_POINTER pRaidVol2ImPhysicalDisk_t; - -#define MPI_RAIDVOLPAGE2_MAX_DISKS (3) - -typedef struct _CONFIG_PAGE_RAID_VOL_2 -{ - fCONFIG_PAGE_HEADER Header; /* 00h */ - U32 VolumeStatus; /* 04h */ - U32 VolumeSettings; /* 08h */ - U32 Reserved; /* 0Ch */ - U64 MaxLba; /* 10h */ - U32 BlockSize; /* 18h */ - U8 Reserved1; /* 1Ch */ - U8 NumPhysicalDisks; /* 1Dh */ - U16 Reserved2; /* 1Eh */ - RAIDVOL2_IM_PHYSICAL_DISK IMPhysicalDisk[MPI_RAIDVOLPAGE2_MAX_DISKS]; -} fCONFIG_PAGE_RAID_VOL_2, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_2, - RaidVolumePage2_t, MPI_POINTER pRaidVolumePage2_t; - -#define MPI_RAIDVOLPAGE2_PAGEVERSION (0x00) - -/* RAID Volume Page 2 VolumeStatus defines */ - -#define MPI_RAIDVOLPAGE2_STATUS_ENABLED (0x00000001) -#define MPI_RAIDVOLPAGE2_STATUS_QUIESCED (0x00000002) -#define MPI_RAIDVOLPAGE2_STATUS_RESYNC_IN_PROGRESS (0x00000004) -#define MPI_RAIDVOLPAGE2_STATUS_DEGRADED (0x00000008) - -/* RAID Volume Page 2 VolumeSettings defines */ - -#define MPI_RAIDVOLPAGE2_SETTING_WRITE_CACHING_ENABLE (0x00000001) -#define MPI_RAIDVOLPAGE2_SETTING_OFFLINE_ON_SMART (0x00000002) -#define MPI_RAIDVOLPAGE2_SETTING_AUTO_CONFIGURE (0x00000004) -#define MPI_RAIDVOLPAGE2_SETTING_USE_DEFAULTS (0x80000000) - - -/****************************************************************************/ -/* LAN Config Pages */ -/****************************************************************************/ +#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01) +#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02) + +#define MPI_PHYSDISK0_STATUS_ONLINE (0x00) +#define MPI_PHYSDISK0_STATUS_MISSING (0x01) +#define MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE (0x02) +#define MPI_PHYSDISK0_STATUS_FAILED (0x03) +#define MPI_PHYSDISK0_STATUS_INITIALIZING (0x04) +#define MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED (0x05) +#define MPI_PHYSDISK0_STATUS_FAILED_REQUESTED (0x06) +#define MPI_PHYSDISK0_STATUS_OTHER_OFFLINE (0xFF) + +typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0 +{ + fCONFIG_PAGE_HEADER Header; /* 00h */ + U8 PhysDiskID; /* 04h */ + U8 PhysDiskBus; /* 05h */ + U8 PhysDiskIOC; /* 06h */ + U8 PhysDiskNum; /* 07h */ + RAID_PHYS_DISK0_SETTINGS PhysDiskSettings; /* 08h */ + U32 Reserved1; /* 0Ch */ + U32 Reserved2; /* 10h */ + U32 Reserved3; /* 14h */ + U8 DiskIdentifier[16]; /* 18h */ + RAID_PHYS_DISK0_INQUIRY_DATA InquiryData; /* 28h */ + RAID_PHYS_DISK0_STATUS PhysDiskStatus; /* 64h */ + U32 MaxLBA; /* 68h */ + RAID_PHYS_DISK0_ERROR_DATA ErrorData; /* 6Ch */ +} fCONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0, + RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t; + +#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x00) + + +/**************************************************************************** +* LAN Config Pages +****************************************************************************/ typedef struct _CONFIG_PAGE_LAN_0 { @@ -1083,8 +1332,8 @@ U32 MaxWireSpeedHigh; /* 1Ch */ U32 BucketsRemaining; /* 20h */ U32 MaxReplySize; /* 24h */ - U32 NegWireSpeedHigh; /* 28h */ - U32 NegWireSpeedLow; /* 2Ch */ + U32 NegWireSpeedLow; /* 28h */ + U32 NegWireSpeedHigh; /* 2Ch */ } fCONFIG_PAGE_LAN_1, MPI_POINTER PTR_CONFIG_PAGE_LAN_1, LANPage1_t, MPI_POINTER pLANPage1_t; diff -Nru a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h --- a/drivers/message/fusion/lsi/mpi_fc.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/mpi_fc.h Thu May 30 21:28:58 2002 @@ -6,7 +6,7 @@ * Title: MPI Fibre Channel messages and structures * Creation Date: June 12, 2000 * - * MPI Version: 01.01.07 + * MPI Version: 01.02.02 * * Version History * --------------- @@ -32,6 +32,9 @@ * Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define. * Added structure offset comments. * 04-09-01 01.01.07 Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 09-28-01 01.02.02 Change name of reserved field in + * MSG_LINK_SERVICE_RSP_REPLY. * -------------------------------------------------------------------------- */ @@ -172,7 +175,7 @@ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U16 Reserved1; /* 04h */ - U8 Reserved2; /* 06h */ + U8 Reserved_0100_InitiatorIndex; /* 06h */ /* obsolete InitiatorIndex */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved3; /* 0Ch */ diff -Nru a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h --- a/drivers/message/fusion/lsi/mpi_init.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/mpi_init.h Thu May 30 21:28:58 2002 @@ -6,7 +6,7 @@ * Title: MPI initiator mode messages and structures * Creation Date: June 8, 2000 * - * MPI Version: 01.01.05 + * MPI Version: 01.02.04 * * Version History * --------------- @@ -22,6 +22,13 @@ * 02-20-01 01.01.03 Started using MPI_POINTER. * 03-27-01 01.01.04 Added structure offset comments. * 04-10-01 01.01.05 Added new MsgFlag for MSG_SCSI_TASK_MGMT. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 08-29-01 01.02.02 Added MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET. + * Added MPI_SCSI_STATE_QUEUE_TAG_REJECTED for + * MSG_SCSI_IO_REPLY. + * 09-28-01 01.02.03 Added structures and defines for SCSI Enclosure + * Processor messages. + * 10-04-01 01.02.04 Added defines for SEP request Action field. * -------------------------------------------------------------------------- */ @@ -151,6 +158,7 @@ #define MPI_SCSI_STATE_NO_SCSI_STATUS (0x04) #define MPI_SCSI_STATE_TERMINATED (0x08) #define MPI_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED (0x20) /* SCSIIO Reply ResponseInfo values */ /* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */ @@ -191,6 +199,7 @@ #define MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) #define MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) #define MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS (0x04) +#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) /* MsgFlags bits */ #define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00) @@ -215,5 +224,92 @@ U32 TerminationCount; /* 14h */ } MSG_SCSI_TASK_MGMT_REPLY, MPI_POINTER PTR_MSG_SCSI_TASK_MGMT_REPLY, SCSITaskMgmtReply_t, MPI_POINTER pSCSITaskMgmtReply_t; + + +/****************************************************************************/ +/* SCSI Enclosure Processor messages */ +/****************************************************************************/ + +typedef struct _MSG_SEP_REQUEST +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U8 Action; /* 04h */ + U8 Reserved1; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 SlotStatus; /* 0Ch */ +} MSG_SEP_REQUEST, MPI_POINTER PTR_MSG_SEP_REQUEST, + SEPRequest_t, MPI_POINTER pSEPRequest_t; + +/* Action defines */ +#define MPI_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI_SEP_REQ_ACTION_READ_STATUS (0x01) + +/* SlotStatus bits for MSG_SEP_REQUEST */ +#define MPI_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) +#define MPI_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI_SEP_REQ_SLOTSTATUS_PARITY_CHECK (0x00000020) +#define MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000) +#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000) +#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) +#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000) +#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000) +#define MPI_SEP_REQ_SLOTSTATUS_SWAP_RESET (0x80000000) + + +typedef struct _MSG_SEP_REPLY +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U8 Action; /* 04h */ + U8 Reserved1; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved3; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 SlotStatus; /* 14h */ +} MSG_SEP_REPLY, MPI_POINTER PTR_MSG_SEP_REPLY, + SEPReply_t, MPI_POINTER pSEPReply_t; + +/* SlotStatus bits for MSG_SEP_REPLY */ +#define MPI_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) +#define MPI_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI_SEP_REPLY_SLOTSTATUS_PARITY_CHECK (0x00000020) +#define MPI_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000) +#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000) +#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000) +#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000) +#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000) +#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) +#define MPI_SEP_REPLY_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000) +#define MPI_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x10000000) +#define MPI_SEP_REPLY_SLOTSTATUS_FAULT_SENSED (0x40000000) +#define MPI_SEP_REPLY_SLOTSTATUS_SWAPPED (0x80000000) #endif diff -Nru a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h --- a/drivers/message/fusion/lsi/mpi_ioc.h Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/lsi/mpi_ioc.h Thu May 30 21:28:59 2002 @@ -6,7 +6,7 @@ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: August 11, 2000 * - * MPI Version: 01.01.07 + * MPI Version: 01.02.04 * * Version History * --------------- @@ -38,6 +38,19 @@ * 03-27-01 01.01.06 Added defines for ProductId field of MPI_FW_HEADER. * Added structure offset comments. * 04-09-01 01.01.07 Added structure EVENT_DATA_EVENT_CHANGE. + * 08-08-01 01.02.01 Original release for v1.2 work. + * New format for FWVersion and ProductId in + * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. + * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and + * related structure and defines. + * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. + * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. + * Replaced a reserved field in MSG_IOC_FACTS_REPLY with + * IOCExceptions and changed DataImageSize to reserved. + * Added MPI_FW_DOWNLOAD_ITYPE_NVSTORE_DATA and + * MPI_FW_UPLOAD_ITYPE_NVDATA. + * 09-28-01 01.02.03 Modified Event Data for Integrated RAID. + * 11-01-01 01.02.04 Added defines for MPI_EXT_IMAGE_HEADER ImageType field. * -------------------------------------------------------------------------- */ @@ -73,6 +86,17 @@ } MSG_IOC_INIT, MPI_POINTER PTR_MSG_IOC_INIT, IOCInit_t, MPI_POINTER pIOCInit_t; +/* WhoInit values */ +#define MPI_WHOINIT_NO_ONE (0x00) +#define MPI_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI_WHOINIT_ROM_BIOS (0x02) +#define MPI_WHOINIT_PCI_PEER (0x03) +#define MPI_WHOINIT_HOST_DRIVER (0x04) +#define MPI_WHOINIT_MANUFACTURER (0x05) + +/* Flags values */ +#define MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE (0x01) + typedef struct _MSG_IOC_INIT_REPLY { U8 WhoInit; /* 00h */ @@ -90,14 +114,6 @@ } MSG_IOC_INIT_REPLY, MPI_POINTER PTR_MSG_IOC_INIT_REPLY, IOCInitReply_t, MPI_POINTER pIOCInitReply_t; -/* WhoInit values */ - -#define MPI_WHOINIT_NO_ONE (0x00) -#define MPI_WHOINIT_SYSTEM_BIOS (0x01) -#define MPI_WHOINIT_ROM_BIOS (0x02) -#define MPI_WHOINIT_PCI_PEER (0x03) -#define MPI_WHOINIT_HOST_DRIVER (0x04) -#define MPI_WHOINIT_MANUFACTURER (0x05) /****************************************************************************/ @@ -115,8 +131,21 @@ } MSG_IOC_FACTS, MPI_POINTER PTR_IOC_FACTS, IOCFacts_t, MPI_POINTER pIOCFacts_t; -/* IOC Facts Reply */ +typedef struct _MPI_FW_VERSION_STRUCT +{ + U8 Dev; /* 00h */ + U8 Unit; /* 01h */ + U8 Minor; /* 02h */ + U8 Major; /* 03h */ +} MPI_FW_VERSION_STRUCT; + +typedef union _MPI_FW_VERSION +{ + MPI_FW_VERSION_STRUCT Struct; + U32 Word; +} MPI_FW_VERSION; +/* IOC Facts Reply */ typedef struct _MSG_IOC_FACTS_REPLY { U16 MsgVersion; /* 00h */ @@ -126,7 +155,7 @@ U8 IOCNumber; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ - U16 Reserved2; /* 0Ch */ + U16 IOCExceptions; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U8 MaxChainDepth; /* 14h */ @@ -135,7 +164,7 @@ U8 Flags; /* 17h */ U16 ReplyQueueDepth; /* 18h */ U16 RequestFrameSize; /* 1Ah */ - U16 FWVersion; /* 1Ch */ + U16 Reserved_0101_FWVersion; /* 1Ch */ /* obsolete 16-bit FWVersion */ U16 ProductID; /* 1Eh */ U32 CurrentHostMfaHighAddr; /* 20h */ U16 GlobalCredits; /* 24h */ @@ -146,18 +175,20 @@ U8 MaxDevices; /* 2Eh */ U8 MaxBuses; /* 2Fh */ U32 FWImageSize; /* 30h */ - U32 DataImageSize; /* 34h */ + U32 Reserved4; /* 34h */ + MPI_FW_VERSION FWVersion; /* 38h */ } MSG_IOC_FACTS_REPLY, MPI_POINTER PTR_MSG_IOC_FACTS_REPLY, IOCFactsReply_t, MPI_POINTER pIOCFactsReply_t; -#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) -#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) + +#define MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) -#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01) -#define MPI_IOCFACTS_FLAGS_DATA_IMAGE_UPLOAD (0x02) +#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01) -#define MPI_IOCFACTS_EVENTSTATE_DISABLED (0x00) -#define MPI_IOCFACTS_EVENTSTATE_ENABLED (0x01) +#define MPI_IOCFACTS_EVENTSTATE_DISABLED (0x00) +#define MPI_IOCFACTS_EVENTSTATE_ENABLED (0x01) @@ -326,7 +357,6 @@ } MSG_EVENT_ACK_REPLY, MPI_POINTER PTR_MSG_EVENT_ACK_REPLY, EventAckReply_t, MPI_POINTER pEventAckReply_t; - /* Switch */ #define MPI_EVENT_NOTIFICATION_SWITCH_OFF (0x00) @@ -345,7 +375,9 @@ #define MPI_EVENT_LOOP_STATE_CHANGE (0x00000008) #define MPI_EVENT_LOGOUT (0x00000009) #define MPI_EVENT_EVENT_CHANGE (0x0000000A) -#define MPI_EVENT_RAID_STATUS_CHANGE (0x0000000B) +#define MPI_EVENT_INTEGRATED_RAID (0x0000000B) +#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C) +#define MPI_EVENT_ON_BUS_TIMER_EXPIRED (0x0000000D) /* AckRequired field values */ @@ -372,6 +404,27 @@ } EVENT_DATA_SCSI, MPI_POINTER PTR_EVENT_DATA_SCSI, EventDataScsi_t, MPI_POINTER pEventDataScsi_t; +/* SCSI Device Status Change Event data */ + +typedef struct _EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 ReasonCode; /* 02h */ + U8 LUN; /* 03h */ + U8 ASC; /* 04h */ + U8 ASCQ; /* 05h */ + U16 Reserved; /* 06h */ +} EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE, + MPI_POINTER PTR_EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE, + MpiEventDataScsiDeviceStatusChange_t, + MPI_POINTER pMpiEventDataScsiDeviceStatusChange_t; + +/* MPI SCSI Device Status Change Event data ReasonCode values */ +#define MPI_EVENT_SCSI_DEV_STAT_RC_ADDED (0x03) +#define MPI_EVENT_SCSI_DEV_STAT_RC_NOT_RESPONDING (0x04) +#define MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA (0x05) + /* MPI Link Status Change Event data */ typedef struct _EVENT_DATA_LINK_STATUS @@ -417,29 +470,34 @@ } EVENT_DATA_LOGOUT, MPI_POINTER PTR_EVENT_DATA_LOGOUT, EventDataLogout_t, MPI_POINTER pEventDataLogout_t; -/* MPI RAID Status Change Event data */ +/* MPI Integrated RAID Event data */ -typedef struct _EVENT_DATA_RAID_STATUS_CHANGE +typedef struct _EVENT_DATA_RAID { - U8 VolumeTargetID; /* 00h */ + U8 VolumeID; /* 00h */ U8 VolumeBus; /* 01h */ U8 ReasonCode; /* 02h */ U8 PhysDiskNum; /* 03h */ U8 ASC; /* 04h */ U8 ASCQ; /* 05h */ U16 Reserved; /* 06h */ -} EVENT_DATA_RAID_STATUS_CHANGE, MPI_POINTER PTR_EVENT_DATA_RAID_STATUS_CHANGE, - MpiEventDataRaidStatusChange_t, MPI_POINTER pMpiEventDataRaidStatusChange_t; - - -/* MPI RAID Status Change Event data ReasonCode values */ - -#define MPI_EVENT_RAID_DATA_RC_VOLUME_OPTIMAL (0x00) -#define MPI_EVENT_RAID_DATA_RC_VOLUME_DEGRADED (0x01) -#define MPI_EVENT_RAID_DATA_RC_STARTED_RESYNC (0x02) -#define MPI_EVENT_RAID_DATA_RC_DISK_ADDED (0x03) -#define MPI_EVENT_RAID_DATA_RC_DISK_NOT_RESPONDING (0x04) -#define MPI_EVENT_RAID_DATA_RC_SMART_DATA (0x05) + U32 SettingsStatus; /* 08h */ +} EVENT_DATA_RAID, MPI_POINTER PTR_EVENT_DATA_RAID, + MpiEventDataRaid_t, MPI_POINTER pMpiEventDataRaid_t; + +/* MPI Integrated RAID Event data ReasonCode values */ +#define MPI_EVENT_RAID_RC_VOLUME_CREATED (0x00) +#define MPI_EVENT_RAID_RC_VOLUME_DELETED (0x01) +#define MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED (0x02) +#define MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED (0x03) +#define MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED (0x04) +#define MPI_EVENT_RAID_RC_PHYSDISK_CREATED (0x05) +#define MPI_EVENT_RAID_RC_PHYSDISK_DELETED (0x06) +#define MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED (0x07) +#define MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED (0x08) +#define MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED (0x09) +#define MPI_EVENT_RAID_RC_SMART_DATA (0x0A) +#define MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED (0x0B) /***************************************************************************** @@ -468,6 +526,7 @@ #define MPI_FW_DOWNLOAD_ITYPE_RESERVED (0x00) #define MPI_FW_DOWNLOAD_ITYPE_FW (0x01) #define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03) typedef struct _FWDownloadTCSGE @@ -476,7 +535,7 @@ U8 ContextSize; /* 01h */ U8 DetailsLength; /* 02h */ U8 Flags; /* 03h */ - U32 Reserved1; /* 04h */ + U32 Reserved_0100_Checksum; /* 04h */ /* obsolete Checksum */ U32 ImageOffset; /* 08h */ U32 ImageSize; /* 0Ch */ } FW_DOWNLOAD_TCSGE, MPI_POINTER PTR_FW_DOWNLOAD_TCSGE, @@ -519,7 +578,7 @@ #define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00) #define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01) #define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) -#define MPI_FW_UPLOAD_ITYPE_DATA_IOC_MEM (0x03) +#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03) typedef struct _FWUploadTCSGE { @@ -563,11 +622,10 @@ U32 Checksum; /* 1Ch */ U16 VendorId; /* 20h */ U16 ProductId; /* 22h */ - U16 FwVersion; /* 24h */ - U16 Reserved1; /* 26h */ + MPI_FW_VERSION FWVersion; /* 24h */ U32 SeqCodeVersion; /* 28h */ U32 ImageSize; /* 2Ch */ - U32 Reserved2; /* 30h */ + U32 NextImageHeaderOffset; /* 30h */ U32 LoadStartAddress; /* 34h */ U32 IopResetVectorValue; /* 38h */ U32 IopResetRegAddr; /* 3Ch */ @@ -581,30 +639,49 @@ #define MPI_FW_HEADER_WHAT_SIGNATURE (0x29232840) /* defines for using the ProductId field */ -#define MPI_FW_HEADER_PID_TYPE_MASK (0xF000) -#define MPI_FW_HEADER_PID_TYPE_SCSI (0x0000) -#define MPI_FW_HEADER_PID_TYPE_FC (0x1000) - -#define MPI_FW_HEADER_PID_FW_VENDOR_MASK (0x0F00) -#define MPI_FW_HEADER_PID_FW_VENDOR_LSI (0x0000) - -#define MPI_FW_HEADER_PID_FAMILY_MASK (0x000F) -#define MPI_FW_HEADER_PID_FAMILY_1030_SCSI (0x0000) -#define MPI_FW_HEADER_PID_FAMILY_909_FC (0x0000) -#define MPI_FW_HEADER_PID_FAMILY_919_FC (0x0001) -#define MPI_FW_HEADER_PID_FAMILY_919X_FC (0x0002) - - -typedef struct _MPI_DATA_HEADER -{ - U32 Signature; /* 00h */ - U16 FunctionNumber; /* 04h */ - U16 Length; /* 06h */ - U32 Checksum; /* 08h */ - U32 LoadStartAddress; /* 0Ch */ -} MPI_DATA_HEADER, MPI_POINTER PTR_MPI_DATA_HEADER, - MpiDataHeader_t, MPI_POINTER pMpiDataHeader_t; +#define MPI_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI_FW_HEADER_PID_TYPE_SCSI (0x0000) +#define MPI_FW_HEADER_PID_TYPE_FC (0x1000) + +#define MPI_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI (0x0100) +#define MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI_FW_HEADER_PID_PROD_TARGET_SCSI (0x0300) +#define MPI_FW_HEADER_PID_PROD_IM_SCSI (0x0400) +#define MPI_FW_HEADER_PID_PROD_IS_SCSI (0x0500) +#define MPI_FW_HEADER_PID_PROD_CTX_SCSI (0x0600) + +#define MPI_FW_HEADER_PID_FAMILY_MASK (0x00FF) +#define MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI (0x0001) +#define MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI (0x0002) +#define MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI (0x0003) +#define MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI (0x0004) +#define MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI (0x0005) +#define MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI (0x0006) +#define MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI (0x0007) +#define MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI (0x0008) +#define MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI (0x0009) +#define MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI (0x000A) +#define MPI_FW_HEADER_PID_FAMILY_909_FC (0x0000) +#define MPI_FW_HEADER_PID_FAMILY_919_FC (0x0001) +#define MPI_FW_HEADER_PID_FAMILY_919X_FC (0x0002) -#define MPI_DATA_HEADER_SIGNATURE (0x43504147) +typedef struct _MPI_EXT_IMAGE_HEADER +{ + U8 ImageType; /* 00h */ + U8 Reserved; /* 01h */ + U16 Reserved1; /* 02h */ + U32 Checksum; /* 04h */ + U32 ImageSize; /* 08h */ + U32 NextImageHeaderOffset; /* 0Ch */ + U32 LoadStartAddress; /* 10h */ + U32 Reserved2; /* 14h */ +} MPI_EXT_IMAGE_HEADER, MPI_POINTER PTR_MPI_EXT_IMAGE_HEADER, + MpiExtImageHeader_t, MPI_POINTER pMpiExtImageHeader_t; + +/* defines for the ImageType field */ +#define MPI_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI_EXT_IMAGE_TYPE_FW (0x01) +#define MPI_EXT_IMAGE_TYPE_NVDATA (0x03) #endif diff -Nru a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h --- a/drivers/message/fusion/lsi/mpi_lan.h Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/lsi/mpi_lan.h Thu May 30 21:28:59 2002 @@ -6,7 +6,7 @@ * Title: MPI LAN messages and structures * Creation Date: June 30, 2000 * - * MPI Version: 01.01.03 + * MPI Version: 01.02.01 * * Version History * --------------- @@ -27,6 +27,7 @@ * 11-02-00 01.01.01 Original release for post 1.0 work * 02-20-01 01.01.02 Started using MPI_POINTER. * 03-27-01 01.01.03 Added structure offset comments. + * 08-08-01 01.02.01 Original release for v1.2 work. * -------------------------------------------------------------------------- */ diff -Nru a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/message/fusion/lsi/mpi_raid.h Thu May 30 21:28:59 2002 @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2001 LSI Logic Corporation. + * + * + * Name: MPI_RAID.H + * Title: MPI RAID message and structures + * Creation Date: February 27, 2001 + * + * MPI Version: 01.02.04 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 02-27-01 01.01.01 Original release for this file. + * 03-27-01 01.01.02 Added structure offset comments. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 09-28-01 01.02.02 Major rework for MPI v1.2 Integrated RAID changes. + * 10-04-01 01.02.03 Added ActionData defines for + * MPI_RAID_ACTION_DELETE_VOLUME action. + * 11-01-01 01.02.04 Added define for MPI_RAID_ACTION_ADATA_DO_NOT_SYNC. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI_RAID_H +#define MPI_RAID_H + + +/****************************************************************************** +* +* R A I D M e s s a g e s +* +*******************************************************************************/ + + +/****************************************************************************/ +/* RAID Volume Request */ +/****************************************************************************/ + +typedef struct _MSG_RAID_ACTION +{ + U8 Action; /* 00h */ + U8 Reserved1; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U8 VolumeID; /* 04h */ + U8 VolumeBus; /* 05h */ + U8 PhysDiskNum; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved2; /* 0Ch */ + U32 ActionDataWord; /* 10h */ + SGE_SIMPLE_UNION ActionDataSGE; /* 14h */ +} MSG_RAID_ACTION_REQUEST, MPI_POINTER PTR_MSG_RAID_ACTION_REQUEST, + MpiRaidActionRequest_t , MPI_POINTER pMpiRaidActionRequest_t; + + +/* RAID Action request Action values */ + +#define MPI_RAID_ACTION_STATUS (0x00) +#define MPI_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI_RAID_ACTION_DISABLE_VOLUME (0x04) +#define MPI_RAID_ACTION_ENABLE_VOLUME (0x05) +#define MPI_RAID_ACTION_QUIESCE_PHYS_IO (0x06) +#define MPI_RAID_ACTION_ENABLE_PHYS_IO (0x07) +#define MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS (0x08) +#define MPI_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI_RAID_ACTION_CHANGE_PHYSDISK_SETTINGS (0x0C) +#define MPI_RAID_ACTION_CREATE_PHYSDISK (0x0D) +#define MPI_RAID_ACTION_DELETE_PHYSDISK (0x0E) +#define MPI_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI_RAID_ACTION_REPLACE_PHYSDISK (0x10) + +/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */ +#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001) + +/* ActionDataWord defines for use with MPI_RAID_ACTION_DELETE_VOLUME action */ +#define MPI_RAID_ACTION_ADATA_KEEP_PHYS_DISKS (0x00000000) +#define MPI_RAID_ACTION_ADATA_DEL_PHYS_DISKS (0x00000001) + + +/* RAID Action reply message */ + +typedef struct _MSG_RAID_ACTION_REPLY +{ + U8 Action; /* 00h */ + U8 Reserved; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U8 VolumeID; /* 04h */ + U8 VolumeBus; /* 05h */ + U8 PhysDiskNum; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 ActionStatus; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 VolumeStatus; /* 14h */ + U32 ActionData; /* 18h */ +} MSG_RAID_ACTION_REPLY, MPI_POINTER PTR_MSG_RAID_ACTION_REPLY, + MpiRaidActionReply_t, MPI_POINTER pMpiRaidActionReply_t; + + +/* RAID Volume reply ActionStatus values */ + +#define MPI_RAID_ACTION_ASTATUS_SUCCESS (0x0000) +#define MPI_RAID_ACTION_ASTATUS_INVALID_ACTION (0x0001) +#define MPI_RAID_ACTION_ASTATUS_FAILURE (0x0002) +#define MPI_RAID_ACTION_ASTATUS_IN_PROGRESS (0x0003) + + +/* RAID Volume reply RAID Volume Indicator structure */ + +typedef struct _MPI_RAID_VOL_INDICATOR +{ + U64 TotalBlocks; /* 00h */ + U64 BlocksRemaining; /* 08h */ +} MPI_RAID_VOL_INDICATOR, MPI_POINTER PTR_MPI_RAID_VOL_INDICATOR, + MpiRaidVolIndicator_t, MPI_POINTER pMpiRaidVolIndicator_t; + + +/****************************************************************************/ +/* SCSI IO RAID Passthrough Request */ +/****************************************************************************/ + +typedef struct _MSG_SCSI_IO_RAID_PT_REQUEST +{ + U8 PhysDiskNum; /* 00h */ + U8 Reserved1; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U8 CDBLength; /* 04h */ + U8 SenseBufferLength; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 LUN[8]; /* 0Ch */ + U32 Control; /* 14h */ + U8 CDB[16]; /* 18h */ + U32 DataLength; /* 28h */ + U32 SenseBufferLowAddr; /* 2Ch */ + SGE_IO_UNION SGL; /* 30h */ +} MSG_SCSI_IO_RAID_PT_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REQUEST, + SCSIIORaidPassthroughRequest_t, MPI_POINTER pSCSIIORaidPassthroughRequest_t; + + +/* SCSI IO RAID Passthrough reply structure */ + +typedef struct _MSG_SCSI_IO_RAID_PT_REPLY +{ + U8 PhysDiskNum; /* 00h */ + U8 Reserved1; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U8 CDBLength; /* 04h */ + U8 SenseBufferLength; /* 05h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 SCSIStatus; /* 0Ch */ + U8 SCSIState; /* 0Dh */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 TransferCount; /* 14h */ + U32 SenseCount; /* 18h */ + U32 ResponseInfo; /* 1Ch */ +} MSG_SCSI_IO_RAID_PT_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REPLY, + SCSIIORaidPassthroughReply_t, MPI_POINTER pSCSIIORaidPassthroughReply_t; + + +#endif + + + diff -Nru a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h --- a/drivers/message/fusion/lsi/mpi_targ.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/mpi_targ.h Thu May 30 21:28:58 2002 @@ -6,7 +6,7 @@ * Title: MPI Target mode messages and structures * Creation Date: June 22, 2000 * - * MPI Version: 01.01.04 + * MPI Version: 01.02.04 * * Version History * --------------- @@ -26,6 +26,14 @@ * Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and * MPI_TARGET_FCP_CMD_BUFFER. * 03-27-01 01.01.04 Added structure offset comments. + * 08-08-01 01.02.01 Original release for v1.2 work. + * 09-28-01 01.02.02 Added structure for MPI_TARGET_SCSI_SPI_STATUS_IU. + * Added PriorityReason field to some replies and + * defined more PriorityReason codes. + * Added some defines for to support previous version + * of MPI. + * 10-04-01 01.02.03 Added PriorityReason to MSG_TARGET_ERROR_REPLY. + * 11-01-01 01.02.04 Added define for TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY. * -------------------------------------------------------------------------- */ @@ -78,6 +86,7 @@ #define CMD_BUFFER_POST_FLAGS_64_BIT_ADDR (0x80) #define CMD_BUFFER_POST_IO_INDEX_MASK (0x00003FFF) +#define CMD_BUFFER_POST_IO_INDEX_MASK_0100 (0x000003FF) /* obsolete */ typedef struct _MSG_TARGET_CMD_BUFFER_POST_REPLY @@ -97,7 +106,7 @@ } MSG_TARGET_CMD_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REPLY, TargetCmdBufferPostReply_t, MPI_POINTER pTargetCmdBufferPostReply_t; - +/* the following structure is obsolete as of MPI v1.2 */ typedef struct _MSG_PRIORITY_CMD_RECEIVED_REPLY { U16 Reserved; /* 00h */ @@ -117,6 +126,13 @@ #define PRIORITY_REASON_NO_DISCONNECT (0x00) #define PRIORITY_REASON_SCSI_TASK_MANAGEMENT (0x01) +#define PRIORITY_REASON_CMD_PARITY_ERR (0x02) +#define PRIORITY_REASON_MSG_OUT_PARITY_ERR (0x03) +#define PRIORITY_REASON_LQ_CRC_ERR (0x04) +#define PRIORITY_REASON_CMD_CRC_ERR (0x05) +#define PRIORITY_REASON_PROTOCOL_ERR (0x06) +#define PRIORITY_REASON_DATA_OUT_PARITY_ERR (0x07) +#define PRIORITY_REASON_DATA_OUT_CRC_ERR (0x08) #define PRIORITY_REASON_UNKNOWN (0xFF) @@ -129,7 +145,8 @@ U8 Reserved2; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ - U16 Reserved3; /* 0Ch */ + U8 PriorityReason; /* 0Ch */ + U8 Reserved3; /* 0Dh */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U32 ReplyWord; /* 14h */ @@ -204,7 +221,8 @@ U8 Reserved2; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ - U16 Reserved3; /* 0Ch */ + U8 PriorityReason; /* 0Ch */ + U8 Reserved3; /* 0Dh */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U32 ReplyWord; /* 14h */ @@ -234,8 +252,34 @@ TargetStatusSendRequest_t, MPI_POINTER pTargetStatusSendRequest_t; #define TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS (0x01) +#define TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY (0x04) #define TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER (0x80) +typedef struct _MPI_TARGET_FCP_RSP_BUFFER +{ + U8 Reserved0[8]; /* 00h */ + U8 FcpStatus; /* 08h */ + U8 FcpFlags; /* 09h */ + U8 Reserved1[2]; /* 0Ah */ + U32 FcpResid; /* 0Ch */ + U32 FcpSenseLength; /* 10h */ + U32 FcpResponseLength; /* 14h */ + U8 FcpResponseData[8]; /* 18h */ + U8 FcpSenseData[32]; /* Pad to 64 bytes */ /* 20h */ +} MPI_TARGET_FCP_RSP_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_RSP_BUFFER, + MpiTargetFcpRspBuffer, MPI_POINTER pMpiTargetFcpRspBuffer; + +typedef struct _MPI_TARGET_SCSI_SPI_STATUS_IU +{ + U8 Reserved0; /* 00h */ + U8 Reserved1; /* 01h */ + U8 Valid; /* 02h */ + U8 Status; /* 03h */ + U32 SenseDataListLength; /* 04h */ + U32 PktFailuresListLength; /* 08h */ + U8 SenseData[52]; /* Pad the IU to 64 bytes */ /* 0Ch */ +} MPI_TARGET_SCSI_SPI_STATUS_IU, MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_STATUS_IU, + TargetScsiSpiStatusIU_t, MPI_POINTER pTargetScsiSpiStatusIU_t; /****************************************************************************/ /* Target Mode Abort Request */ @@ -323,6 +367,41 @@ #define SET_PORT(t, p) ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) | \ (((p) << TARGET_MODE_REPLY_PORT_SHIFT) & \ TARGET_MODE_REPLY_PORT_MASK)) + +/* the following obsolete values are for MPI v1.0 support */ +#define TARGET_MODE_REPLY_0100_MASK_HOST_INDEX (0x000003FF) +#define TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX (0) +#define TARGET_MODE_REPLY_0100_MASK_IOC_INDEX (0x001FF800) +#define TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX (11) +#define TARGET_MODE_REPLY_0100_PORT_MASK (0x00400000) +#define TARGET_MODE_REPLY_0100_PORT_SHIFT (22) +#define TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX (0x1F800000) +#define TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX (23) + +#define GET_HOST_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) \ + >> TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) + +#define SET_HOST_INDEX_0100(t, hi) \ + ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) | \ + (((hi) << TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) & \ + TARGET_MODE_REPLY_0100_MASK_HOST_INDEX)) + +#define GET_IOC_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) \ + >> TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) + +#define SET_IOC_INDEX_0100(t, ii) \ + ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) | \ + (((ii) << TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) & \ + TARGET_MODE_REPLY_0100_MASK_IOC_INDEX)) + +#define GET_INITIATOR_INDEX_0100(x) \ + (((x) & TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) \ + >> TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) + +#define SET_INITIATOR_INDEX_0100(t, ii) \ + ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) | \ + (((ii) << TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) & \ + TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX)) #endif diff -Nru a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h --- a/drivers/message/fusion/lsi/mpi_type.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/lsi/mpi_type.h Thu May 30 21:28:58 2002 @@ -6,7 +6,7 @@ * Title: MPI Basic type definitions * Creation Date: June 6, 2000 * - * MPI Version: 01.01.02 + * MPI Version: 01.02.01 * * Version History * --------------- @@ -17,6 +17,7 @@ * 06-06-00 01.00.01 Update version number for 1.0 release. * 11-02-00 01.01.01 Original release for post 1.0 work * 02-20-01 01.01.02 Added define and ifdef for MPI_POINTER. + * 08-08-01 01.02.01 Original release for v1.2 work. * -------------------------------------------------------------------------- */ diff -Nru a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c --- a/drivers/message/fusion/mptbase.c Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/mptbase.c Thu May 30 21:28:59 2002 @@ -20,6 +20,12 @@ * And to Roger Hickerson (LSI Logic) for tirelessly supporting * this driver project. * + * A special thanks to Pamela Delaney (LSI Logic) for tons of work + * and countless enhancements while adding support for the 1030 + * chip family. Pam has been instrumental in the development of + * of the 2.xx.xx series fusion drivers, and her contributions are + * far too numerous to hope to list in one place. + * * All manner of help from Stephen Shirron (LSI Logic): * low-level FC analysis, debug + various fixes in FCxx firmware, * initial port to alpha platform, various driver code optimizations, @@ -38,11 +44,12 @@ * for gobs of hard work fixing and optimizing LAN code. * THANK YOU! * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptbase.c,v 1.53.4.3 2001/09/18 03:54:54 sralston Exp $ + * $Id: mptbase.c,v 1.110 2002/02/27 18:44:20 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -93,11 +100,14 @@ #include #include #include -#include +#include /* needed for in_interrupt() proto */ #include #ifdef CONFIG_MTRR #include #endif +#ifdef __sparc__ +#include /* needed for __irq_itoa() proto */ +#endif #include "mptbase.h" @@ -110,27 +120,33 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); - /* * cmd line parameters */ MODULE_PARM(PortIo, "0-1i"); MODULE_PARM_DESC(PortIo, "[0]=Use mmap, 1=Use port io"); -MODULE_PARM(HardReset, "0-1i"); -MODULE_PARM_DESC(HardReset, "0=Disable HardReset, [1]=Enable HardReset"); static int PortIo = 0; -static int HardReset = 1; + +#ifdef MFCNT +static int mfcounter = 0; +#define PRINT_MF_COUNT 20000 +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Public data... */ -int mpt_lan_index = 0; -int mpt_stm_index = 0; +int mpt_lan_index = -1; +int mpt_stm_index = -1; + +struct proc_dir_entry *mpt_proc_root_dir; + +DmpServices_t *DmpService; + +void *mpt_v_ASCQ_TablePtr; +const char **mpt_ScsiOpcodesPtr; +int mpt_ASCQ_TableSz; -void *mpt_v_ASCQ_TablePtr = NULL; -const char **mpt_ScsiOpcodesPtr = NULL; -int mpt_ASCQ_TableSz = 0; #define WHOINIT_UNKNOWN 0xAA @@ -139,12 +155,12 @@ * Private data... */ /* Adapter lookup table */ -static MPT_ADAPTER *mpt_adapters[MPT_MAX_ADAPTERS] = {0}; + MPT_ADAPTER *mpt_adapters[MPT_MAX_ADAPTERS]; static MPT_ADAPTER_TRACKER MptAdapters; /* Callback lookup table */ static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS]; /* Protocol driver class lookup table */ -static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS]; +static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS]; /* Event handler lookup table */ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; /* Reset handler lookup table */ @@ -152,6 +168,10 @@ static int FusionInitCalled = 0; static int mpt_base_index = -1; +static int last_drv_idx = -1; +static int isense_idx = -1; + +static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -160,49 +180,84 @@ static void mpt_interrupt(int irq, void *bus_id, struct pt_regs *r); static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); -static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason); -static int mpt_adapter_install(struct pci_dev *pdev); -static void mpt_detect_929_bound_ports(MPT_ADAPTER *this, struct pci_dev *pdev); +static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag); +static int mpt_adapter_install(struct pci_dev *pdev); +static void mpt_detect_bound_ports(MPT_ADAPTER *this, struct pci_dev *pdev); static void mpt_adapter_disable(MPT_ADAPTER *ioc, int freeup); static void mpt_adapter_dispose(MPT_ADAPTER *ioc); static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc); -static int MakeIocReady(MPT_ADAPTER *ioc, int force); -static u32 GetIocState(MPT_ADAPTER *ioc, int cooked); -static int GetIocFacts(MPT_ADAPTER *ioc); -static int GetPortFacts(MPT_ADAPTER *ioc, int portnum); -static int SendIocInit(MPT_ADAPTER *ioc); -static int SendPortEnable(MPT_ADAPTER *ioc, int portnum); -static int mpt_fc9x9_reset(MPT_ADAPTER *ioc, int ignore); -static int KickStart(MPT_ADAPTER *ioc, int ignore); -static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type); +static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag); +//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); +static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason); +static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag); +static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag); +static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag); +static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag); +static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag); +static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag); +static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag); +static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag); static int PrimeIocFifos(MPT_ADAPTER *ioc); -static int HandShakeReqAndReply(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait); -static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong); -static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong); -static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong); +static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag); +static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag); +static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag); static int GetLanConfigPages(MPT_ADAPTER *ioc); +static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum); +static int GetIoUnitPage2(MPT_ADAPTER *ioc); +static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); +static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); +static int mpt_findImVolumes(MPT_ADAPTER *ioc); +static void mpt_timer_expired(unsigned long data); static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); -static int procmpt_create(void); #ifdef CONFIG_PROC_FS +static int procmpt_create(void); static int procmpt_destroy(void); +static int procmpt_summary_read(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int procmpt_version_read(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int procmpt_iocinfo_read(char *buf, char **start, off_t offset, + int request, int *eof, void *data); #endif -static int procmpt_read_summary(char *page, char **start, off_t off, int count, int *eof, void *data); -static int procmpt_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data); -/*static int procmpt_info(char *buf, char **start, off_t offset, int len);*/ +static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); +//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info); -static struct proc_dir_entry *procmpt_root_dir = NULL; - int fusion_init(void); static void fusion_exit(void); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * more Private data... + */ +#ifdef CONFIG_PROC_FS +struct _mpt_proc_list { + const char *name; + int (*f)(char *, char **, off_t, int, int *, void *); +} mpt_proc_list[] = { + { "summary", procmpt_summary_read}, + { "version", procmpt_version_read}, +}; +#define MPT_PROC_ENTRIES (sizeof(mpt_proc_list)/sizeof(mpt_proc_list[0])) + +struct _mpt_ioc_proc_list { + const char *name; + int (*f)(char *, char **, off_t, int, int *, void *); +} mpt_ioc_proc_list[] = { + { "info", procmpt_iocinfo_read}, + { "summary", procmpt_summary_read}, +}; +#define MPT_IOC_PROC_ENTRIES (sizeof(mpt_ioc_proc_list)/sizeof(mpt_ioc_proc_list[0])) + +#endif + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* 20000207 -sralston * GRRRRR... IOSpace (port i/o) register access (for the 909) is back! * 20000517 -sralston @@ -225,9 +280,18 @@ writel(v, a); } +static inline void CHIPREG_PIO_WRITE32(volatile u32 *a, u32 v) +{ + outl(v, (unsigned long)a); +} + +static inline u32 CHIPREG_PIO_READ32(volatile u32 *a) +{ + return inl((unsigned long)a); +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure @@ -252,8 +316,7 @@ MPT_FRAME_HDR *mf; MPT_FRAME_HDR *mr; u32 pa; - u32 *m; - int req_idx; + int req_idx = -1; int cb_idx; int type; int freeme; @@ -262,6 +325,21 @@ ioc = bus_id; /* + * Verify ioc pointer is ok + */ + { + MPT_ADAPTER *iocCmp; + iocCmp = mpt_adapter_find_first(); + while ((ioc != iocCmp) && iocCmp) + iocCmp = mpt_adapter_find_next(iocCmp); + + if (!iocCmp) { + printk(KERN_WARNING "mpt_interrupt: Invalid ioc!\n"); + return; + } + } + + /* * Drain the reply FIFO! * * NOTES: I've seen up to 10 replies processed in this loop, so far... @@ -281,25 +359,27 @@ * Check for non-TURBO reply! */ if (pa & MPI_ADDRESS_REPLY_A_BIT) { - dma_addr_t reply_dma_addr; + u32 reply_dma_low; u16 ioc_stat; /* non-TURBO reply! Hmmm, something may be up... * Newest turbo reply mechanism; get address * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)! */ - reply_dma_addr = (pa = (pa << 1)); - /* Map DMA address of reply header to cpu address. */ - m = (u32 *) ((u8 *)ioc->reply_frames + - (reply_dma_addr - ioc->reply_frames_dma)); + /* Map DMA address of reply header to cpu address. + * pa is 32 bits - but the dma address may be 32 or 64 bits + * get offset based only only the low addresses + */ + reply_dma_low = (pa = (pa << 1)); + mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames + + (reply_dma_low - ioc->reply_frames_low_dma)); - mr = (MPT_FRAME_HDR *) m; req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx); cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; mf = MPT_INDEX_2_MFPTR(ioc, req_idx); - dprintk((KERN_INFO MYNAM ": %s: Got non-TURBO reply=%p\n", + dprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p\n", ioc->name, mr)); DBG_DUMP_REPLY_FRAME(mr) @@ -307,7 +387,7 @@ * Check/log IOC log info */ ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus); - if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); if ((int)ioc->chip_type <= (int)FC929) mpt_fc_log_info(ioc, log_info); @@ -318,7 +398,7 @@ /* * Process turbo (context) reply... */ - dirqprintk((KERN_INFO MYNAM ": %s: Got TURBO reply(=%08x)\n", ioc->name, pa)); + dirqprintk((MYIOC_s_INFO_FMT "Got TURBO reply(=%08x)\n", ioc->name, pa)); type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT); if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) { cb_idx = mpt_stm_index; @@ -357,6 +437,34 @@ pa = 0; /* No reply flush! */ } + if ((int)ioc->chip_type > (int)FC929) { + /* Verify mf, mf are reasonable. + */ + if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth)) + || (mf < ioc->req_frames)) ) { + printk(MYIOC_s_WARN_FMT + "mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, mf, req_idx); + cb_idx = 0; + pa = 0; + freeme = 0; + } + if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth)) + || (mr < ioc->reply_frames)) ) { + printk(MYIOC_s_WARN_FMT + "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, mr); + cb_idx = 0; + pa = 0; + freeme = 0; + } + if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) { + printk(MYIOC_s_WARN_FMT + "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx); + cb_idx = 0; + pa = 0; + freeme = 0; + } + } + /* Check for (valid) IO callback! */ if (cb_idx) { /* Do the callback! */ @@ -374,15 +482,18 @@ /* Put Request back on FreeQ! */ spin_lock_irqsave(&ioc->FreeQlock, flags); Q_ADD_TAIL(&ioc->FreeQ, &mf->u.frame.linkage, MPT_FRAME_HDR); +#ifdef MFCNT + ioc->mfcnt--; +#endif spin_unlock_irqrestore(&ioc->FreeQlock, flags); } count++; - dirqprintk((KERN_INFO MYNAM ": %s: ISR processed frame #%d\n", ioc->name, count)); + dirqprintk((MYIOC_s_INFO_FMT "ISR processed frame #%d\n", ioc->name, count)); mb(); if (count >= MPT_MAX_REPLIES_PER_ISR) { - dirqprintk((KERN_INFO MYNAM ": %s: ISR processed %d replies.", + dirqprintk((MYIOC_s_INFO_FMT "ISR processed %d replies.", ioc->name, count)); dirqprintk((" Giving this ISR a break!\n")); return; @@ -409,17 +520,17 @@ int freereq = 1; u8 func; - dprintk((KERN_INFO MYNAM ": %s: mpt_base_reply() called\n", ioc->name)); + dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name)); if ((mf == NULL) || (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(KERN_ERR MYNAM ": %s: ERROR - NULL or BAD request frame ptr! (=%p)\n", + printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n", ioc->name, mf); return 1; } if (reply == NULL) { - dprintk((KERN_ERR MYNAM ": %s: ERROR - Unexpected NULL Event (turbo?) reply!\n", + dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n", ioc->name)); return 1; } @@ -430,7 +541,7 @@ } func = reply->u.hdr.Function; - dprintk((KERN_INFO MYNAM ": %s: mpt_base_reply, Function=%02Xh\n", + dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n", ioc->name, func)); if (func == MPI_FUNCTION_EVENT_NOTIFICATION) { @@ -441,30 +552,77 @@ results = ProcessEventNotification(ioc, pEvReply, &evHandlers); if (results != evHandlers) { /* CHECKME! Any special handling needed here? */ - dprintk((KERN_WARNING MYNAM ": %s: Hmmm... Called %d event handlers, sum results = %d\n", + dprintk((MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n", ioc->name, evHandlers, results)); } /* - * Hmmm... It seems that EventNotificationReply is an exception - * to the rule of one reply per request. + * Hmmm... It seems that EventNotificationReply is an exception + * to the rule of one reply per request. */ if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) freereq = 0; + #ifdef CONFIG_PROC_FS // LogEvent(ioc, pEvReply); #endif + } else if (func == MPI_FUNCTION_EVENT_ACK) { - dprintk((KERN_INFO MYNAM ": %s: mpt_base_reply, EventAck reply received\n", + dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n", ioc->name)); + } else if (func == MPI_FUNCTION_CONFIG) { + CONFIGPARMS *pCfg; + unsigned long flags; + + dprintk((MYIOC_s_INFO_FMT "config_complete (mf=%p,mr=%p)\n", + ioc->name, mf, reply)); + + pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *))); + + if (pCfg) { + /* disable timer and remove from linked list */ + del_timer(&pCfg->timer); + + spin_lock_irqsave(&ioc->FreeQlock, flags); + Q_DEL_ITEM(&pCfg->linkage); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + /* + * If IOC Status is SUCCESS, save the header + * and set the status code to GOOD. + */ + pCfg->status = MPT_CONFIG_ERROR; + if (reply) { + ConfigReply_t *pReply = (ConfigReply_t *)reply; + u16 status; + + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + dprintk((KERN_NOTICE " IOCStatus=%04xh, IOCLogInfo=%08xh\n", + status, le32_to_cpu(pReply->IOCLogInfo))); + + pCfg->status = status; + if (status == MPI_IOCSTATUS_SUCCESS) { + pCfg->hdr->PageVersion = pReply->Header.PageVersion; + pCfg->hdr->PageLength = pReply->Header.PageLength; + pCfg->hdr->PageNumber = pReply->Header.PageNumber; + pCfg->hdr->PageType = pReply->Header.PageType; + } + } + + /* + * Wake up the original calling thread + */ + pCfg->wait_done = 1; + wake_up(&mpt_waitq); + } } else { - printk(KERN_ERR MYNAM ": %s: ERROR - Unexpected msg function (=%02Xh) reply received!\n", + printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", ioc->name, func); } /* - * Conditionally tell caller to free the original - * EventNotification/EventAck/unexpected request frame! + * Conditionally tell caller to free the original + * EventNotification/EventAck/unexpected request frame! */ return freereq; } @@ -480,21 +638,22 @@ * protocol-specific driver must do this before it will be able to * use any IOC resources, such as obtaining request frames. * - * NOTES: The SCSI protocol driver currently calls this routine twice - * in order to register separate callbacks; one for "normal" SCSI IO - * and another for MptScsiTaskMgmt requests. + * NOTES: The SCSI protocol driver currently calls this routine thrice + * in order to register separate callbacks; one for "normal" SCSI IO; + * one for MptScsiTaskMgmt requests; one for Scan/DV requests. * * Returns a positive integer valued "handle" in the - * range (and S.O.D. order) {7,6,...,1} if successful. + * range (and S.O.D. order) {N,...,7,6,5,...,1} if successful. * Any non-positive return value (including zero!) should be considered * an error by the caller. */ int mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass) { - int r = -1; int i; + last_drv_idx = -1; + #ifndef MODULE /* * Handle possibility of the mptscsih_detect() routine getting @@ -512,7 +671,7 @@ #endif /* - * Search for empty callback slot in this order: {7,6,...,1} + * Search for empty callback slot in this order: {N,...,7,6,5,...,1} * (slot/handle 0 is reserved!) */ for (i = MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { @@ -520,7 +679,7 @@ MptCallbacks[i] = cbfunc; MptDriverClass[i] = dclass; MptEvHandlers[i] = NULL; - r = i; + last_drv_idx = i; if (cbfunc != mpt_base_reply) { MOD_INC_USE_COUNT; } @@ -528,7 +687,7 @@ } } - return r; + return last_drv_idx; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -546,6 +705,11 @@ MptCallbacks[cb_idx] = NULL; MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER; MptEvHandlers[cb_idx] = NULL; + + last_drv_idx++; + if (isense_idx != -1 && isense_idx <= cb_idx) + isense_idx++; + if (cb_idx != mpt_base_index) { MOD_DEC_USE_COUNT; } @@ -639,7 +803,8 @@ * @handle: Handle of registered MPT protocol driver * @iocid: IOC unique identifier (integer) * - * Returns pointer to a MPT request frame or %NULL if none are available. + * Returns pointer to a MPT request frame or %NULL if none are available + * or IOC is not active. */ MPT_FRAME_HDR* mpt_get_msg_frame(int handle, int iocid) @@ -650,6 +815,16 @@ /* validate handle and ioc identifier */ iocp = mpt_adapters[iocid]; + +#ifdef MFCNT + if (!iocp->active) + printk(KERN_WARNING "IOC Not Active! mpt_get_msg_frame returning NULL!\n"); +#endif + + /* If interrupts are not attached, do not return a request frame */ + if (!iocp->active) + return NULL; + spin_lock_irqsave(&iocp->FreeQlock, flags); if (! Q_IS_EMPTY(&iocp->FreeQ)) { int req_offset; @@ -662,8 +837,20 @@ mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_offset / iocp->req_sz); mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0; +#ifdef MFCNT + iocp->mfcnt++; +#endif } spin_unlock_irqrestore(&iocp->FreeQlock, flags); + +#ifdef MFCNT + if (mf == NULL) + printk(KERN_WARNING "IOC Active. No free Msg Frames! Count 0x%x Max 0x%x\n", iocp->mfcnt, iocp->req_depth); + mfcounter++; + if (mfcounter == PRINT_MF_COUNT) + printk(KERN_INFO "MF Count 0x%x Max 0x%x \n", iocp->mfcnt, iocp->req_depth); +#endif + dmfprintk((KERN_INFO MYNAM ": %s: mpt_get_msg_frame(%d,%d), got mf=%p\n", iocp->name, handle, iocid, mf)); return mf; @@ -687,7 +874,7 @@ iocp = mpt_adapters[iocid]; if (iocp != NULL) { - dma_addr_t mf_dma_addr; + u32 mf_dma_addr; int req_offset; /* ensure values are reset properly! */ @@ -700,23 +887,23 @@ #ifdef MPT_DEBUG_MSG_FRAME { u32 *m = mf->u.frame.hwhdr.__hdr; - int i, n; + int ii, n; printk(KERN_INFO MYNAM ": %s: About to Put msg frame @ %p:\n" KERN_INFO " ", iocp->name, m); n = iocp->req_sz/4 - 1; while (m[n] == 0) n--; - for (i=0; i<=n; i++) { - if (i && ((i%8)==0)) + for (ii=0; ii<=n; ii++) { + if (ii && ((ii%8)==0)) printk("\n" KERN_INFO " "); - printk(" %08x", le32_to_cpu(m[i])); + printk(" %08x", le32_to_cpu(m[ii])); } printk("\n"); } #endif - mf_dma_addr = iocp->req_frames_dma + req_offset; + mf_dma_addr = iocp->req_frames_low_dma + req_offset; CHIPREG_WRITE32(&iocp->chip->RequestFifo, mf_dma_addr); } } @@ -742,6 +929,9 @@ /* Put Request back on FreeQ! */ spin_lock_irqsave(&iocp->FreeQlock, flags); Q_ADD_TAIL(&iocp->FreeQ, &mf->u.frame.linkage, MPT_FRAME_HDR); +#ifdef MFCNT + iocp->mfcnt--; +#endif spin_unlock_irqrestore(&iocp->FreeQlock, flags); } } @@ -754,8 +944,9 @@ * @iocid: IOC unique identifier (integer) * @reqBytes: Size of the request in bytes * @req: Pointer to MPT request frame + * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. * - * This routine is used exclusively by mptscsih to send MptScsiTaskMgmt + * This routine is used exclusively to send MptScsiTaskMgmt * requests since they are required to be sent via doorbell handshake. * * NOTE: It is the callers responsibility to byte-swap fields in the @@ -764,41 +955,30 @@ * Returns 0 for success, non-zero for failure. */ int -mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req) +mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req, int sleepFlag) { MPT_ADAPTER *iocp; int r = 0; iocp = mpt_adapters[iocid]; if (iocp != NULL) { - u8 *req_as_bytes; - u32 ioc_raw_state; - int i; - - /* YIKES! We already know something is amiss. - * Do upfront check on IOC state. - */ - ioc_raw_state = GetIocState(iocp, 0); - if ((ioc_raw_state & MPI_DOORBELL_ACTIVE) || - ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL)) { - printk(KERN_WARNING MYNAM ": %s: Bad IOC state (%08x) WARNING!\n", - iocp->name, ioc_raw_state); - if ((r = mpt_do_ioc_recovery(iocp, MPT_HOSTEVENT_IOC_RECOVER)) != 0) { - printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n", - r, iocp->name); - return r; - } - } + u8 *req_as_bytes; + int ii; + + /* State is known to be good upon entering + * this function so issue the bus reset + * request. + */ /* * Emulate what mpt_put_msg_frame() does /wrt to sanity * setting cb_idx/req_idx. But ONLY if this request * is in proper (pre-alloc'd) request buffer range... */ - i = MFPTR_2_MPT_INDEX(iocp,(MPT_FRAME_HDR*)req); - if (reqBytes >= 12 && i >= 0 && i < iocp->req_depth) { + ii = MFPTR_2_MPT_INDEX(iocp,(MPT_FRAME_HDR*)req); + if (reqBytes >= 12 && ii >= 0 && ii < iocp->req_depth) { MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req; - mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(i); + mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii); mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; } @@ -810,36 +990,40 @@ ((reqBytes/4)<chip->Doorbell) & MPI_DOORBELL_ACTIVE)) + return -5; + dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n", - iocp->name, i)); + iocp->name, ii)); CHIPREG_WRITE32(&iocp->chip->IntStatus, 0); - if ((r = WaitForDoorbellAck(iocp, 1)) < 0) { + if ((r = WaitForDoorbellAck(iocp, 1, sleepFlag)) < 0) { return -2; } /* Send request via doorbell handshake */ req_as_bytes = (u8 *) req; - for (i = 0; i < reqBytes/4; i++) { + for (ii = 0; ii < reqBytes/4; ii++) { u32 word; - word = ((req_as_bytes[(i*4) + 0] << 0) | - (req_as_bytes[(i*4) + 1] << 8) | - (req_as_bytes[(i*4) + 2] << 16) | - (req_as_bytes[(i*4) + 3] << 24)); + word = ((req_as_bytes[(ii*4) + 0] << 0) | + (req_as_bytes[(ii*4) + 1] << 8) | + (req_as_bytes[(ii*4) + 2] << 16) | + (req_as_bytes[(ii*4) + 3] << 24)); CHIPREG_WRITE32(&iocp->chip->Doorbell, word); - if ((r = WaitForDoorbellAck(iocp, 1)) < 0) { + if ((r = WaitForDoorbellAck(iocp, 1, sleepFlag)) < 0) { r = -3; break; } } - if ((r = WaitForDoorbellInt(iocp, 2)) >= 0) + if ((r = WaitForDoorbellInt(iocp, 10, sleepFlag)) >= 0) r = 0; else r = -4; @@ -871,8 +1055,8 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_adapter_find_next - Find next MPT adapter pointer. - * @prev: Pointer to previous MPT adapter + * mpt_adapter_find_next - Find next MPT adapter pointer. + * @prev: Pointer to previous MPT adapter * * Returns next MPT adapter pointer or %NULL if there are no more. */ @@ -888,13 +1072,13 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_pci_scan - Scan PCI devices for MPT adapters. * * Returns count of MPT adapters found, keying off of PCI vendor and * device_id's. */ -int __init +static int __init mpt_pci_scan(void) { struct pci_dev *pdev; @@ -906,7 +1090,7 @@ dprintk((KERN_INFO MYNAM ": Checking for MPT adapters...\n")); /* - * NOTE: The 929 (I believe) will appear as 2 separate PCI devices, + * NOTE: The 929 and 1030 will appear as 2 separate PCI devices, * one for each channel. */ pci_for_each_dev(pdev) { @@ -917,9 +1101,9 @@ if ((pdev->device != MPI_MANUFACTPAGE_DEVICEID_FC909) && (pdev->device != MPI_MANUFACTPAGE_DEVICEID_FC929) && (pdev->device != MPI_MANUFACTPAGE_DEVICEID_FC919) && + (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1030) && #if 0 /* FIXME! C103x family */ - (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1030) && (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1030_ZC) && (pdev->device != MPI_MANUFACTPAGE_DEVID_53C1035) && #endif @@ -929,7 +1113,7 @@ } /* GRRRRR - * 929 dual function devices may be presented in Func 1,0 order, + * dual function devices (929, 1030) may be presented in Func 1,0 order, * but we'd really really rather have them in Func 0,1 order. * Do some kind of look ahead here... */ @@ -937,11 +1121,11 @@ pdev2 = pci_peek_next_dev(pdev); if (pdev2 && (pdev2->vendor == 0x1000) && (PCI_SLOT(pdev2->devfn) == PCI_SLOT(pdev->devfn)) && - (pdev2->device == MPI_MANUFACTPAGE_DEVICEID_FC929) && + (pdev2->device == pdev->device) && (pdev2->bus->number == pdev->bus->number) && !(pdev2->devfn & 1)) { dprintk((KERN_INFO MYNAM ": MPT adapter found: PCI bus/dfn=%02x/%02xh, class=%08x, id=%xh\n", - pdev2->bus->number, pdev2->devfn, pdev2->class, pdev2->device)); + pdev2->bus->number, pdev2->devfn, pdev2->class, pdev2->device)); found++; if ((r = mpt_adapter_install(pdev2)) == 0) count++; @@ -969,9 +1153,7 @@ } #ifdef CONFIG_PROC_FS - if (procmpt_create() != 0) - printk(KERN_WARNING MYNAM ": WARNING! - %s creation failed!\n", - MPT_PROCFS_MPTBASEDIR); + (void) procmpt_create(); #endif return count; @@ -1004,7 +1186,7 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_adapter_install - Install a PCI intelligent MPT adapter. * @pdev: Pointer to pci_dev structure * @@ -1030,7 +1212,7 @@ unsigned long port; u32 msize; u32 psize; - int i; + int ii; int r = -ENODEV; int len; @@ -1040,41 +1222,68 @@ return -ENOMEM; } memset(ioc, 0, sizeof(*ioc)); - ioc->req_sz = MPT_REQ_SIZE; /* avoid div by zero! */ ioc->alloc_total = sizeof(MPT_ADAPTER); + ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ + ioc->reply_sz = ioc->req_sz; ioc->pcidev = pdev; + ioc->diagPending = 0; + spin_lock_init(&ioc->diagLock); + + /* Initialize the event logging. + */ + ioc->eventTypes = 0; /* None */ + ioc->eventContext = 0; + ioc->eventLogSize = 0; + ioc->events = NULL; + +#ifdef MFCNT + ioc->mfcnt = 0; +#endif + + /* Initialize the FW and Data image pointers. + */ + ioc->FWImage = NULL; + ioc->FWImage_dma = 0; + + /* Initilize SCSI Config Data structure + */ + memset(&ioc->spi_data, 0, sizeof(ScsiCfgData)); + + /* Initialize the running configQ head. + */ + Q_INIT(&ioc->configQ, Q_ITEM); /* Find lookup slot. */ - for (i=0; i < MPT_MAX_ADAPTERS; i++) { - if (mpt_adapters[i] == NULL) { - ioc->id = i; /* Assign adapter unique id (lookup) */ + for (ii=0; ii < MPT_MAX_ADAPTERS; ii++) { + if (mpt_adapters[ii] == NULL) { + ioc->id = ii; /* Assign adapter unique id (lookup) */ break; } } - if (i == MPT_MAX_ADAPTERS) { - printk(KERN_ERR MYNAM ": ERROR - mpt_adapters[%d] table overflow!\n", i); + if (ii == MPT_MAX_ADAPTERS) { + printk(KERN_ERR MYNAM ": ERROR - mpt_adapters[%d] table overflow!\n", ii); kfree(ioc); return -ENFILE; } mem_phys = msize = 0; port = psize = 0; - for (i=0; i < DEVICE_COUNT_RESOURCE; i++) { - if (pdev->PCI_BASEADDR_FLAGS(i) & PCI_BASE_ADDRESS_SPACE_IO) { + for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) { + if (pdev->PCI_BASEADDR_FLAGS(ii) & PCI_BASE_ADDRESS_SPACE_IO) { /* Get I/O space! */ - port = pdev->PCI_BASEADDR_START(i); - psize = PCI_BASEADDR_SIZE(pdev,i); + port = pdev->PCI_BASEADDR_START(ii); + psize = PCI_BASEADDR_SIZE(pdev,ii); } else { /* Get memmap */ - mem_phys = pdev->PCI_BASEADDR_START(i); - msize = PCI_BASEADDR_SIZE(pdev,i); + mem_phys = pdev->PCI_BASEADDR_START(ii); + msize = PCI_BASEADDR_SIZE(pdev,ii); break; } } ioc->mem_size = msize; - if (i == DEVICE_COUNT_RESOURCE) { + if (ii == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR MYNAM ": ERROR - MPT adapter has no memory regions defined!\n"); kfree(ioc); return -EINVAL; @@ -1098,6 +1307,8 @@ } dprintk((KERN_INFO MYNAM ": mem = %p, mem_phys = %lx\n", mem, mem_phys)); + dprintk((KERN_INFO MYNAM ": facts @ %p, pfacts[0] @ %p\n", + &ioc->facts, &ioc->pfacts[0])); if (PortIo) { u8 *pmem = (u8*)port; ioc->mem_phys = port; @@ -1107,6 +1318,13 @@ ioc->chip = (SYSIF_REGS*)mem; } + /* Save Port IO values incase we need to do downloadboot */ + { + u8 *pmem = (u8*)port; + ioc->pio_mem_phys = port; + ioc->pio_chip = (SYSIF_REGS*)pmem; + } + ioc->chip_type = FCUNK; if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) { ioc->chip_type = FC909; @@ -1120,12 +1338,19 @@ ioc->chip_type = FC919; ioc->prod_name = "LSIFC919"; } -#if 0 - else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_53C1030) { + else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) { ioc->chip_type = C1030; ioc->prod_name = "LSI53C1030"; + { + /* 1030 Chip Fix. Disable Split transactions + * for PCIX. Set bits 4 - 6 to zero. + */ + u16 pcixcmd = 0; + pci_read_config_word(pdev, 0x6a, &pcixcmd); + pcixcmd &= 0xFF8F; + pci_write_config_word(pdev, 0x6a, pcixcmd); + } } -#endif myname = "iocN"; len = strlen(myname); @@ -1145,8 +1370,13 @@ r = request_irq(pdev->irq, mpt_interrupt, SA_SHIRQ, ioc->name, ioc); if (r < 0) { - printk(KERN_ERR MYNAM ": %s: ERROR - Unable to allocate interrupt %d!\n", +#ifndef __sparc__ + printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %d!\n", ioc->name, pdev->irq); +#else + printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %s!\n", + ioc->name, __irq_itoa(pdev->irq)); +#endif iounmap(mem); kfree(ioc); return -EBUSY; @@ -1156,7 +1386,11 @@ pci_set_master(pdev); /* ?? */ +#ifndef __sparc__ dprintk((KERN_INFO MYNAM ": %s installed at interrupt %d\n", ioc->name, pdev->irq)); +#else + dprintk((KERN_INFO MYNAM ": %s installed at interrupt %s\n", ioc->name, __irq_itoa(pdev->irq))); +#endif } /* tack onto tail of our MPT adapter list */ @@ -1166,12 +1400,12 @@ mpt_adapters[ioc->id] = ioc; /* NEW! 20010220 -sralston - * Check for "929 bound ports" to reduce redundant resets. + * Check for "bound ports" (929, 1030) to reduce redundant resets. */ - if (ioc->chip_type == FC929) - mpt_detect_929_bound_ports(ioc, pdev); + if ((ioc->chip_type == FC929) || (ioc->chip_type == C1030)) + mpt_detect_bound_ports(ioc, pdev); - if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP)) != 0) { + if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0) { printk(KERN_WARNING MYNAM ": WARNING - %s did not initialize properly! (%d)\n", ioc->name, r); } @@ -1180,10 +1414,11 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * mpt_do_ioc_recovery - Initialize or recover MPT adapter. * @ioc: Pointer to MPT adapter structure * @reason: Event word / reason + * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. * * This routine performs all the steps necessary to bring the IOC * to a OPERATIONAL state. @@ -1191,16 +1426,21 @@ * This routine also pre-fetches the LAN MAC address of a Fibre Channel * MPT adapter. * - * Returns 0 for success. + * Returns: + * 0 for success + * -1 if failed to get board READY + * -2 if READY but IOCFacts Failed + * -3 if READY but PrimeIOCFifos Failed + * -4 if READY but IOCInit Failed */ static int -mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason) +mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) { int hard_reset_done = 0; int alt_ioc_ready = 0; int hard; int r; - int i; + int ii; int handlers; printk(KERN_INFO MYNAM ": Initiating %s %s\n", @@ -1211,156 +1451,106 @@ ioc->active = 0; /* NOTE: Access to IOC's request FreeQ is now blocked! */ -// FIXME? Cleanup all IOC requests here! (or below?) -// But watch out for event associated request? + if (ioc->alt_ioc) { + /* Disable alt-IOC's reply interrupts for a bit ... */ + CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); + ioc->alt_ioc->active = 0; + /* NOTE: Access to alt-IOC's request FreeQ is now blocked! */ + } - hard = HardReset; - if (ioc->alt_ioc && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) + hard = 1; + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) hard = 0; - if ((hard_reset_done = MakeIocReady(ioc, hard)) < 0) { + if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) { printk(KERN_WARNING MYNAM ": %s NOT READY WARNING!\n", ioc->name); return -1; } -// NEW! -#if 0 // Kiss-of-death!?! - if (ioc->alt_ioc) { -// Grrr... Hold off any alt-IOC interrupts (and events) while -// handshaking to IOC, needed because? - /* Disable alt-IOC's reply interrupts for a bit ... */ - alt_ioc_intmask = CHIPREG_READ32(&ioc->alt_ioc->chip->IntMask); - CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); - ioc->alt_ioc->active = 0; - /* NOTE: Access to alt-IOC's request FreeQ is now blocked! */ - } -#endif - + /* hard_reset_done = 0 if a soft reset was performed + * and 1 if a hard reset was performed. + */ if (hard_reset_done && ioc->alt_ioc) { - if ((r = MakeIocReady(ioc->alt_ioc, 0)) == 0) + if ((r = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) alt_ioc_ready = 1; else - printk(KERN_WARNING MYNAM ": alt-%s: (%d) Not ready WARNING!\n", + printk(KERN_WARNING MYNAM + ": alt-%s: (%d) Not ready WARNING!\n", ioc->alt_ioc->name, r); } + /* Get IOC facts! */ + if ((r = GetIocFacts(ioc, sleepFlag, reason)) != 0) + return -2; if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { - /* Get IOC facts! */ - if ((r = GetIocFacts(ioc)) != 0) - return -2; MptDisplayIocCapabilities(ioc); } - /* - * Call each currently registered protocol IOC reset handler - * with pre-reset indication. - * NOTE: If we're doing _IOC_BRINGUP, there can be no - * MptResetHandlers[] registered yet. - */ - if (hard_reset_done) { - r = handlers = 0; - for (i=MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { - if (MptResetHandlers[i]) { - dprintk((KERN_INFO MYNAM ": %s: Calling IOC pre_reset handler #%d\n", - ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc, MPT_IOC_PRE_RESET); - handlers++; - - if (alt_ioc_ready) { - dprintk((KERN_INFO MYNAM ": %s: Calling alt-IOC pre_reset handler #%d\n", - ioc->alt_ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); - handlers++; - } - } - } - /* FIXME? Examine results here? */ - } - - // May need to check/upload firmware & data here! - - if ((r = SendIocInit(ioc)) != 0) - return -3; -// NEW! if (alt_ioc_ready) { - if ((r = SendIocInit(ioc->alt_ioc)) != 0) { - alt_ioc_ready = 0; - printk(KERN_WARNING MYNAM ": alt-%s: (%d) init failure WARNING!\n", - ioc->alt_ioc->name, r); - } - } - - /* - * Call each currently registered protocol IOC reset handler - * with post-reset indication. - * NOTE: If we're doing _IOC_BRINGUP, there can be no - * MptResetHandlers[] registered yet. - */ - if (hard_reset_done) { - r = handlers = 0; - for (i=MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { - if (MptResetHandlers[i]) { - dprintk((KERN_INFO MYNAM ": %s: Calling IOC post_reset handler #%d\n", - ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc, MPT_IOC_POST_RESET); - handlers++; - - if (alt_ioc_ready) { - dprintk((KERN_INFO MYNAM ": %s: Calling alt-IOC post_reset handler #%d\n", - ioc->alt_ioc->name, i)); - r += (*(MptResetHandlers[i]))(ioc->alt_ioc, MPT_IOC_POST_RESET); - handlers++; - } - } + if ((r = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) + return -2; + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + MptDisplayIocCapabilities(ioc->alt_ioc); } - /* FIXME? Examine results here? */ } /* * Prime reply & request queues! - * (mucho alloc's) + * (mucho alloc's) Must be done prior to + * init as upper addresses are needed for init. */ if ((r = PrimeIocFifos(ioc)) != 0) + return -3; + + // May need to check/upload firmware & data here! + if ((r = SendIocInit(ioc, sleepFlag)) != 0) return -4; // NEW! if (alt_ioc_ready && ((r = PrimeIocFifos(ioc->alt_ioc)) != 0)) { printk(KERN_WARNING MYNAM ": alt-%s: (%d) FIFO mgmt alloc WARNING!\n", ioc->alt_ioc->name, r); + alt_ioc_ready = 0; } -// FIXME! Cleanup all IOC (and alt-IOC?) requests here! + if (alt_ioc_ready) { + if ((r = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { + alt_ioc_ready = 0; + printk(KERN_WARNING MYNAM + ": alt-%s: (%d) init failure WARNING!\n", + ioc->alt_ioc->name, r); + } + } - if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && - (ioc->lan_cnfg_page0.Header.PageLength == 0)) { - /* - * Pre-fetch the ports LAN MAC address! - * (LANPage1_t stuff) - */ - (void) GetLanConfigPages(ioc); -#ifdef MPT_DEBUG - { - u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; - dprintk((KERN_INFO MYNAM ": %s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", - ioc->name, a[5], a[4], a[3], a[2], a[1], a[0] )); + if (reason == MPT_HOSTEVENT_IOC_BRINGUP){ + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + dprintk((MYIOC_s_INFO_FMT + "firmware upload required!\n", ioc->name)); + + r = mpt_do_upload(ioc, sleepFlag); + if (r != 0) + printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); + /* Handle the alt IOC too */ + if (alt_ioc_ready){ + r = mpt_do_upload(ioc->alt_ioc, sleepFlag); + if (r != 0) + printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); + } } -#endif } + /* Enable! (reply interrupt) */ CHIPREG_WRITE32(&ioc->chip->IntMask, ~(MPI_HIM_RIM)); ioc->active = 1; -// NEW! -#if 0 // Kiss-of-death!?! - if (alt_ioc_ready && (r==0)) { + if (ioc->alt_ioc) { /* (re)Enable alt-IOC! (reply interrupt) */ dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n", ioc->alt_ioc->name)); CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM)); ioc->alt_ioc->active = 1; } -#endif /* NEW! 20010120 -sralston * Enable MPT base driver management of EventNotification @@ -1368,19 +1558,95 @@ */ if (!ioc->facts.EventState) (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */ -// NEW! -// FIXME!?! -// if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) { -// (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */ -// } + + if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) + (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */ + + /* (Bugzilla:fibrebugs, #513) + * Bug fix (part 2)! 20010905 -sralston + * Add additional "reason" check before call to GetLanConfigPages + * (combined with GetIoUnitPage2 call). This prevents a somewhat + * recursive scenario; GetLanConfigPages times out, timer expired + * routine calls HardResetHandler, which calls into here again, + * and we try GetLanConfigPages again... + */ + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + if ((int)ioc->chip_type <= (int)FC929) { + /* + * Pre-fetch FC port WWN and stuff... + * (FCPortPage0_t stuff) + */ + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + (void) GetFcPortPage0(ioc, ii); + } + + if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && + (ioc->lan_cnfg_page0.Header.PageLength == 0)) { + /* + * Pre-fetch the ports LAN MAC address! + * (LANPage1_t stuff) + */ + (void) GetLanConfigPages(ioc); +#ifdef MPT_DEBUG + { + u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; + dprintk((MYIOC_s_INFO_FMT "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + ioc->name, a[5], a[4], a[3], a[2], a[1], a[0] )); + } +#endif + } + } else { + /* Get NVRAM and adapter maximums from SPP 0 and 2 + */ + mpt_GetScsiPortSettings(ioc, 0); + + /* Get version and length of SDP 1 + */ + mpt_readScsiDevicePageHeaders(ioc, 0); + + /* Find IM volumes + */ + if (ioc->facts.MsgVersion >= 0x0102) + mpt_findImVolumes(ioc); + } + + GetIoUnitPage2(ioc); + } + + /* + * Call each currently registered protocol IOC reset handler + * with post-reset indication. + * NOTE: If we're doing _IOC_BRINGUP, there can be no + * MptResetHandlers[] registered yet. + */ + if (hard_reset_done) { + r = handlers = 0; + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + if (MptResetHandlers[ii]) { + dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n", + ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET); + handlers++; + + if (alt_ioc_ready) { + dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", + ioc->name, ioc->alt_ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); + handlers++; + } + } + } + /* FIXME? Examine results here? */ + } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * mpt_detect_929_bound_ports - Search for PCI bus/dev_function - * which matches PCI bus/dev_function (+/-1) for newly discovered 929. + * mpt_detect_bound_ports - Search for PCI bus/dev_function + * which matches PCI bus/dev_function (+/-1) for newly discovered 929 + * or 1030. * @ioc: Pointer to MPT adapter structure * @pdev: Pointer to (struct pci_dev) structure * @@ -1388,22 +1654,22 @@ * using alt_ioc pointer fields in their %MPT_ADAPTER structures. */ static void -mpt_detect_929_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) +mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) { MPT_ADAPTER *ioc_srch = mpt_adapter_find_first(); unsigned int match_lo, match_hi; match_lo = pdev->devfn-1; match_hi = pdev->devfn+1; - dprintk((KERN_INFO MYNAM ": %s: PCI bus/devfn=%x/%x, searching for devfn match on %x or %x\n", + dprintk((MYIOC_s_INFO_FMT "PCI bus/devfn=%x/%x, searching for devfn match on %x or %x\n", ioc->name, pdev->bus->number, pdev->devfn, match_lo, match_hi)); while (ioc_srch != NULL) { struct pci_dev *_pcidev = ioc_srch->pcidev; - if ( (_pcidev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) && - (_pcidev->bus->number == pdev->bus->number) && - (_pcidev->devfn == match_lo || _pcidev->devfn == match_hi) ) { + if ((_pcidev->device == pdev->device) && + (_pcidev->bus->number == pdev->bus->number) && + (_pcidev->devfn == match_lo || _pcidev->devfn == match_hi) ) { /* Paranoia checks */ if (ioc->alt_ioc != NULL) { printk(KERN_WARNING MYNAM ": Oops, already bound (%s <==> %s)!\n", @@ -1418,8 +1684,6 @@ ioc->name, ioc_srch->name)); ioc_srch->alt_ioc = ioc; ioc->alt_ioc = ioc_srch; - ioc->sod_reset = ioc->alt_ioc->sod_reset; - ioc->last_kickstart = ioc->alt_ioc->last_kickstart; break; } ioc_srch = mpt_adapter_find_next(ioc_srch); @@ -1440,10 +1704,10 @@ u32 state; /* Disable the FW */ - state = GetIocState(this, 1); + state = mpt_GetIocState(this, 1); if (state == MPI_IOC_STATE_OPERATIONAL) { - if (SendIocReset(this, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET) != 0) - (void) KickStart(this, 1); + if (SendIocReset(this, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, NO_SLEEP) != 0) + (void) KickStart(this, 1, NO_SLEEP); } /* Disable adapter interrupts! */ @@ -1475,12 +1739,37 @@ } if (freeup && this->sense_buf_pool != NULL) { - sz = (this->req_depth * 256); + sz = (this->req_depth * MPT_SENSE_BUFFER_ALLOC); pci_free_consistent(this->pcidev, sz, this->sense_buf_pool, this->sense_buf_pool_dma); this->sense_buf_pool = NULL; this->alloc_total -= sz; } + + if (freeup && this->events != NULL){ + sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); + kfree(this->events); + this->events = NULL; + this->alloc_total -= sz; + } + + if (freeup && this->FWImage != NULL) { + sz = this->facts.FWImageSize; + pci_free_consistent(this->pcidev, sz, + this->FWImage, this->FWImage_dma); + this->FWImage = NULL; + this->alloc_total -= sz; + } + + if (freeup && this->spi_data.nvram != NULL) { + kfree(this->spi_data.nvram); + this->spi_data.nvram = NULL; + } + + if (freeup && this->spi_data.pIocPg3 != NULL) { + kfree(this->spi_data.pIocPg3); + this->spi_data.pIocPg3 = NULL; + } } } @@ -1575,23 +1864,30 @@ /* * MakeIocReady - Get IOC to a READY state, using KickStart if needed. * @ioc: Pointer to MPT_ADAPTER structure - * @kick: Force hard KickStart of IOC + * @force: Force hard KickStart of IOC + * @sleepFlag: Specifies whether the process can sleep * - * Returns 0 for already-READY, 1 for hard reset success, - * else negative for failure. + * Returns: + * 1 - DIAG reset and READY + * 0 - READY initially OR soft reset and READY + * -1 - Any failure on KickStart + * -2 - Msg Unit Reset Failed + * -3 - IO Unit Reset Failed + * -4 - IOC owned by a PEER */ static int -MakeIocReady(MPT_ADAPTER *ioc, int force) +MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) { u32 ioc_state; int statefault = 0; - int cntdn; + int cntdn; int hard_reset_done = 0; int r; - int i; + int ii; + int whoinit; /* Get current [raw] IOC state */ - ioc_state = GetIocState(ioc, 0); + ioc_state = mpt_GetIocState(ioc, 0); dhsprintk((KERN_INFO MYNAM "::MakeIocReady, %s [raw] state=%08x\n", ioc->name, ioc_state)); /* @@ -1600,7 +1896,7 @@ */ if (ioc_state & MPI_DOORBELL_ACTIVE) { statefault = 1; - printk(KERN_WARNING MYNAM ": %s: Uh-oh, unexpected doorbell active!\n", + printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n", ioc->name); } @@ -1613,7 +1909,7 @@ */ if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { statefault = 2; - printk(KERN_WARNING MYNAM ": %s: Uh-oh, IOC is in FAULT state!!!\n", + printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n", ioc->name); printk(KERN_WARNING " FAULT code = %04xh\n", ioc_state & MPI_DOORBELL_DATA_MASK); @@ -1623,28 +1919,49 @@ * Hmmm... Did it get left operational? */ if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) { - statefault = 3; - dprintk((KERN_WARNING MYNAM ": %s: Hmmm... IOC operational unexpected\n", + dprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n", ioc->name)); + + /* Check WhoInit. + * If PCI Peer, exit. + * Else, if no fault conditions are present, issue a MessageUnitReset + * Else, fall through to KickStart case + */ + whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT; + dprintk((KERN_WARNING MYNAM + ": whoinit 0x%x\n statefault %d force %d\n", + whoinit, statefault, force)); + if (whoinit == MPI_WHOINIT_PCI_PEER) + return -4; + else { + if ((statefault == 0 ) && (force == 0)) { + if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0) + return 0; + } + statefault = 3; + } } - hard_reset_done = KickStart(ioc, statefault||force); + hard_reset_done = KickStart(ioc, statefault||force, sleepFlag); if (hard_reset_done < 0) return -1; /* * Loop here waiting for IOC to come READY. */ - i = 0; + ii = 0; cntdn = HZ * 15; - while ((ioc_state = GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { + if (sleepFlag != CAN_SLEEP) + cntdn *= 10; /* 1500 iterations @ 1msec per */ + + while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { if (ioc_state == MPI_IOC_STATE_OPERATIONAL) { /* * BIOS or previous driver load left IOC in OP state. * Reset messaging FIFOs. */ - if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)) != 0) { - printk(KERN_ERR MYNAM ": %s: ERROR - IOC msg unit reset failed!\n", ioc->name); + if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) { + printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name); return -2; } } else if (ioc_state == MPI_IOC_STATE_RESET) { @@ -1652,25 +1969,30 @@ * Something is wrong. Try to get IOC back * to a known state. */ - if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET)) != 0) { - printk(KERN_ERR MYNAM ": %s: ERROR - IO unit reset failed!\n", ioc->name); + if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) { + printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name); return -3; } } - i++; cntdn--; + ii++; cntdn--; if (!cntdn) { - printk(KERN_ERR MYNAM ": %s: ERROR - Wait IOC_READY state timeout(%d)!\n", - ioc->name, (i+5)/HZ); + printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", + ioc->name, (ii+5)/HZ); return -ETIME; } - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay (1); /* 1 msec delay */ + } + } if (statefault < 3) { - printk(KERN_WARNING MYNAM ": %s: Whew! Recovered from %s\n", + printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name, statefault==1 ? "stuck handshake" : "IOC FAULT"); } @@ -1680,21 +2002,21 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * GetIocState - Get the current state of a MPT adapter. + * mpt_GetIocState - Get the current state of a MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @cooked: Request raw or cooked IOC state * * Returns all IOC Doorbell register bits if cooked==0, else just the * Doorbell bits in MPI_IOC_STATE_MASK. */ -static u32 -GetIocState(MPT_ADAPTER *ioc, int cooked) +u32 +mpt_GetIocState(MPT_ADAPTER *ioc, int cooked) { u32 s, sc; /* Get! */ s = CHIPREG_READ32(&ioc->chip->Doorbell); - dprintk((KERN_INFO MYNAM ": %s: raw state = %08x\n", ioc->name, s)); +// dprintk((MYIOC_s_INFO_FMT "raw state = %08x\n", ioc->name, s)); sc = s & MPI_IOC_STATE_MASK; /* Save! */ @@ -1707,11 +2029,13 @@ /* * GetIocFacts - Send IOCFacts request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure + * @sleepFlag: Specifies whether the process can sleep + * @reason: If recovery, only update facts. * * Returns 0 for success, non-zero for failure. */ static int -GetIocFacts(MPT_ADAPTER *ioc) +GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) { IOCFacts_t get_facts; IOCFactsReply_t *facts; @@ -1741,14 +2065,13 @@ get_facts.Function = MPI_FUNCTION_IOC_FACTS; /* Assert: All other get_facts fields are zero! */ - dprintk((KERN_INFO MYNAM ": %s: Sending get IocFacts request\n", ioc->name)); + dprintk((MYIOC_s_INFO_FMT "Sending get IocFacts request\n", ioc->name)); /* No non-zero fields in the get_facts request are greater than * 1 byte in size, so we can just fire it off as is. */ - r = HandShakeReqAndReply(ioc, - req_sz, (u32*)&get_facts, - reply_sz, (u16*)facts, 3); + r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts, + reply_sz, (u16*)facts, 3 /*seconds*/, sleepFlag); if (r != 0) return r; @@ -1761,14 +2084,17 @@ */ /* Did we get a valid reply? */ if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) { - /* - * If not been here, done that, save off first WhoInit value - */ - if (ioc->FirstWhoInit == WHOINIT_UNKNOWN) - ioc->FirstWhoInit = facts->WhoInit; + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + /* + * If not been here, done that, save off first WhoInit value + */ + if (ioc->FirstWhoInit == WHOINIT_UNKNOWN) + ioc->FirstWhoInit = facts->WhoInit; + } facts->MsgVersion = le16_to_cpu(facts->MsgVersion); facts->MsgContext = le32_to_cpu(facts->MsgContext); + facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions); facts->IOCStatus = le16_to_cpu(facts->IOCStatus); facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo); status = facts->IOCStatus & MPI_IOCSTATUS_MASK; @@ -1776,7 +2102,23 @@ facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth); facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize); - facts->FWVersion = le16_to_cpu(facts->FWVersion); + + /* + * FC f/w version changed between 1.1 and 1.2 + * Old: u16{Major(4),Minor(4),SubMinor(8)} + * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} + */ + if (facts->MsgVersion < 0x0102) { + /* + * Handle old FC f/w style, convert to new... + */ + u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion); + facts->FWVersion.Word = + ((oldv<<12) & 0xFF000000) | + ((oldv<<8) & 0x000FFF00); + } else + facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); + facts->ProductID = le16_to_cpu(facts->ProductID); facts->CurrentHostMfaHighAddr = le32_to_cpu(facts->CurrentHostMfaHighAddr); @@ -1791,52 +2133,42 @@ * Older MPI-1.00.xx struct had 13 dwords, and enlarged * to 14 in MPI-1.01.0x. */ - if (facts->MsgLength >= sizeof(IOCFactsReply_t)/sizeof(u32) && facts->MsgVersion > 0x0100) { + if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && + facts->MsgVersion > 0x0100) { facts->FWImageSize = le32_to_cpu(facts->FWImageSize); - facts->DataImageSize = le32_to_cpu(facts->DataImageSize); } - if (facts->RequestFrameSize) { - /* - * Set values for this IOC's REQUEST queue size & depth... - */ - ioc->req_sz = MIN(MPT_REQ_SIZE, facts->RequestFrameSize * 4); - - /* - * Set values for this IOC's REPLY queue size & depth... - * - * BUG? FIX? 20000516 -nromer & sralston - * GRRR... The following did not translate well from MPI v0.09: - * ioc->reply_sz = MIN(MPT_REPLY_SIZE, facts->ReplySize * 4); - * to 0.10: - * ioc->reply_sz = MIN(MPT_REPLY_SIZE, facts->BlockSize * 4); - * Was trying to minimally optimize to smallest possible reply size - * (and greatly reduce kmalloc size). But LAN may need larger reply? - * - * So for now, just set reply size to request size. FIXME? - */ - ioc->reply_sz = ioc->req_sz; - } else { + if (!facts->RequestFrameSize) { /* Something is wrong! */ - printk(KERN_ERR MYNAM ": %s: ERROR - IOC reported invalid 0 request size!\n", + printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n", ioc->name); - ioc->req_sz = MPT_REQ_SIZE; - ioc->reply_sz = MPT_REPLY_SIZE; return -55; } - ioc->req_depth = MIN(MPT_REQ_DEPTH, facts->GlobalCredits); - ioc->reply_depth = MIN(MPT_REPLY_DEPTH, facts->ReplyQueueDepth); - dprintk((KERN_INFO MYNAM ": %s: reply_sz=%3d, reply_depth=%4d\n", + if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { + /* + * Set values for this IOC's request & reply frame sizes, + * and request & reply queue depths... + */ + ioc->req_sz = MIN(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4); + ioc->req_depth = MIN(MPT_DEFAULT_REQ_DEPTH, facts->GlobalCredits); + ioc->reply_sz = ioc->req_sz; + ioc->reply_depth = MIN(MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth); + + /* 1030 - should we use a smaller DEFAULT_REPLY_DEPTH? + * FIX + */ + dprintk((MYIOC_s_INFO_FMT "reply_sz=%3d, reply_depth=%4d\n", ioc->name, ioc->reply_sz, ioc->reply_depth)); - dprintk((KERN_INFO MYNAM ": %s: req_sz =%3d, req_depth =%4d\n", + dprintk((MYIOC_s_INFO_FMT "req_sz =%3d, req_depth =%4d\n", ioc->name, ioc->req_sz, ioc->req_depth)); - /* Get port facts! */ - if ( (r = GetPortFacts(ioc, 0)) != 0 ) - return r; + /* Get port facts! */ + if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 ) + return r; + } } else { - printk(KERN_ERR MYNAM ": %s: ERROR - Invalid IOC facts reply!\n", + printk(MYIOC_s_ERR_FMT "Invalid IOC facts reply!\n", ioc->name); return -66; } @@ -1849,15 +2181,16 @@ * GetPortFacts - Send PortFacts request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: Port number + * @sleepFlag: Specifies whether the process can sleep * * Returns 0 for success, non-zero for failure. */ static int -GetPortFacts(MPT_ADAPTER *ioc, int portnum) +GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag) { PortFacts_t get_pfacts; PortFactsReply_t *pfacts; - int i; + int ii; int req_sz; int reply_sz; @@ -1883,16 +2216,16 @@ get_pfacts.PortNumber = portnum; /* Assert: All other get_pfacts fields are zero! */ - dprintk((KERN_INFO MYNAM ": %s: Sending get PortFacts(%d) request\n", + dprintk((MYIOC_s_INFO_FMT "Sending get PortFacts(%d) request\n", ioc->name, portnum)); /* No non-zero fields in the get_pfacts request are greater than * 1 byte in size, so we can just fire it off as is. */ - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&get_pfacts, - reply_sz, (u16*)pfacts, 3); - if (i != 0) - return i; + ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts, + reply_sz, (u16*)pfacts, 3 /*seconds*/, sleepFlag); + if (ii != 0) + return ii; /* Did we get a valid reply? */ @@ -1914,13 +2247,14 @@ /* * SendIocInit - Send IOCInit request to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure + * @sleepFlag: Specifies whether the process can sleep * * Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state. * * Returns 0 for success, non-zero for failure. */ static int -SendIocInit(MPT_ADAPTER *ioc) +SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) { IOCInit_t ioc_init; MPIDefaultReply_t init_reply; @@ -1937,20 +2271,35 @@ ioc_init.Function = MPI_FUNCTION_IOC_INIT; /* ioc_init.Flags = 0; */ - /*ioc_init.MaxDevices = 16;*/ - ioc_init.MaxDevices = 255; -/* ioc_init.MaxBuses = 16; */ - ioc_init.MaxBuses = 1; + if ((int)ioc->chip_type <= (int)FC929) { + ioc_init.MaxDevices = MPT_MAX_FC_DEVICES; + } + else { + ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES; + } + ioc_init.MaxBuses = MPT_MAX_BUS; /* ioc_init.MsgFlags = 0; */ /* ioc_init.MsgContext = cpu_to_le32(0x00000000); */ ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ - ioc_init.HostMfaHighAddr = cpu_to_le32(0); /* Say we 32-bit! for now */ - dprintk((KERN_INFO MYNAM ": %s: Sending IOCInit (req @ %p)\n", ioc->name, &ioc_init)); +#ifdef __ia64__ + /* Save the upper 32-bits of the request + * (reply) and sense buffers. + */ + ioc_init.HostMfaHighAddr = cpu_to_le32((u32)(ioc->req_frames_dma >> 32)); + ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)(ioc->sense_buf_pool_dma >> 32)); +#else + /* Force 32-bit addressing */ + ioc_init.HostMfaHighAddr = cpu_to_le32(0); + ioc_init.SenseBufferHighAddr = cpu_to_le32(0); +#endif + + dprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n", + ioc->name, &ioc_init)); - r = HandShakeReqAndReply(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, - sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10); + r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, + sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag); if (r != 0) return r; @@ -1958,7 +2307,7 @@ * since we don't even look at it's contents. */ - if ((r = SendPortEnable(ioc, 0)) != 0) + if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) return r; /* YIKES! SUPER IMPORTANT!!! @@ -1967,21 +2316,27 @@ */ count = 0; cntdn = HZ * 60; /* chg'd from 30 to 60 seconds */ - state = GetIocState(ioc, 1); + if (sleepFlag != CAN_SLEEP) + cntdn *= 10; /* scale for 1msec delays */ + state = mpt_GetIocState(ioc, 1); while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay(1); + } if (!cntdn) { - printk(KERN_ERR MYNAM ": %s: ERROR - Wait IOC_OP state timeout(%d)!\n", + printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n", ioc->name, (count+5)/HZ); return -9; } - state = GetIocState(ioc, 1); + state = mpt_GetIocState(ioc, 1); count++; } - dhsprintk((KERN_INFO MYNAM ": %s: INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", ioc->name, count)); return r; @@ -1992,17 +2347,18 @@ * SendPortEnable - Send PortEnable request to MPT adapter port. * @ioc: Pointer to MPT_ADAPTER structure * @portnum: Port number to enable + * @sleepFlag: Specifies whether the process can sleep * * Send PortEnable to bring IOC to OPERATIONAL state. * * Returns 0 for success, non-zero for failure. */ static int -SendPortEnable(MPT_ADAPTER *ioc, int portnum) +SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag) { PortEnable_t port_enable; MPIDefaultReply_t reply_buf; - int i; + int ii; int req_sz; int reply_sz; @@ -2019,13 +2375,21 @@ /* port_enable.MsgFlags = 0; */ /* port_enable.MsgContext = 0; */ - dprintk((KERN_INFO MYNAM ": %s: Sending Port(%d)Enable (req @ %p)\n", + dprintk((MYIOC_s_INFO_FMT "Sending Port(%d)Enable (req @ %p)\n", ioc->name, portnum, &port_enable)); - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&port_enable, - reply_sz, (u16*)&reply_buf, 65); - if (i != 0) - return i; + /* RAID FW may take a long time to enable + */ + if ((int)ioc->chip_type <= (int)FC929) { + ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, + reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag); + } else { + ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, + reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag); + } + + if (ii != 0) + return ii; /* We do not even look at the reply, so we need not * swap the multi-byte fields. @@ -2036,19 +2400,341 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* + * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port. + * @ioc: Pointer to MPT_ADAPTER structure + * @sleepFlag: Specifies whether the process can sleep + * + * Returns 0 for success, >0 for handshake failure + * <0 for fw upload failure. + * + * Remark: If bound IOC and a successful FWUpload was performed + * on the bound IOC, the second image is discarded + * and memory is free'd. Both channels must upload to prevent + * IOC from running in degraded mode. + */ +static int +mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) +{ + u8 request[sizeof(FWUpload_t) + 24]; + u8 reply[sizeof(FWUploadReply_t)]; + FWUpload_t *prequest; + FWUploadReply_t *preply; + FWUploadTCSGE_t *ptcsge = NULL; + MptSge_t *psge; + u8 *mem; + dma_addr_t dma_addr; + int sgeoffset; + int i, sz, req_sz, reply_sz; + int cmdStatus, freeMem = 0; + + /* If the image size is 0 or if the pointer is + * not NULL (error), we are done. + */ + if (((sz = ioc->facts.FWImageSize) == 0) || ioc->FWImage) + return 0; + + /* Allocate memory + */ + mem = pci_alloc_consistent(ioc->pcidev, sz, &ioc->FWImage_dma); + if (mem == NULL) + return -1; + + memset(mem, 0, sz); + ioc->alloc_total += sz; + ioc->FWImage = mem; + dprintk((KERN_INFO MYNAM ": FW Image @ %p[%p], sz=%d bytes\n", + mem, (void *)(ulong)ioc->FWImage_dma, sz)); + + dma_addr = ioc->FWImage_dma; + + prequest = (FWUpload_t *)&request; + preply = (FWUploadReply_t *)&reply; + + /* Destination... */ + req_sz = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + + sizeof(FWUploadTCSGE_t) + sizeof(MptSge_t); + memset(prequest, 0, req_sz); + + reply_sz = sizeof(reply); + memset(preply, 0, reply_sz); + + prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; + prequest->Function = MPI_FUNCTION_FW_UPLOAD; + prequest->MsgContext = 0; /* anything */ + + ptcsge = (FWUploadTCSGE_t *) &prequest->SGL; + ptcsge->Reserved = 0; + ptcsge->ContextSize = 0; + ptcsge->DetailsLength = 12; + ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; + ptcsge->Reserved1 = 0; + ptcsge->ImageOffset = 0; + ptcsge->ImageSize = cpu_to_le32(sz); + + sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t); + psge = (MptSge_t *) &request[sgeoffset]; + psge->FlagsLength = cpu_to_le32(MPT_SGE_FLAGS_SSIMPLE_READ | (u32) sz); + + cpu_to_leXX(dma_addr, psge->Address); + + dprintk((MYIOC_s_INFO_FMT "Sending FW Upload (req @ %p)\n", + ioc->name, prequest)); + + i = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)prequest, + reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); + + cmdStatus = -EFAULT; + if (i == 0) { + /* Handshake transfer was complete and successful. + * Check the Reply Frame. + */ + int status, transfer_sz; + status = le16_to_cpu(preply->IOCStatus); + if (status == MPI_IOCSTATUS_SUCCESS) { + transfer_sz = le32_to_cpu(preply->ActualImageSize); + if (transfer_sz == sz) + cmdStatus = 0; + } + } + dprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n", + ioc->name, cmdStatus)); + + /* Check to see if we have a copy of this image in + * host memory already. + */ + if (cmdStatus == 0) { + if (ioc->alt_ioc && ioc->alt_ioc->FWImage) + freeMem = 1; + } + + /* We already have a copy of this image or + * we had some type of an error - either the handshake + * failed (i != 0) or the command did not complete successfully. + */ + if (cmdStatus || freeMem) { + dprintk((MYIOC_s_INFO_FMT ": do_upload freeing %s image \n", + ioc->name, cmdStatus ? "incomplete" : "duplicate")); + + pci_free_consistent(ioc->pcidev, sz, + ioc->FWImage, ioc->FWImage_dma); + ioc->FWImage = NULL; + ioc->alloc_total -= sz; + } + + return cmdStatus; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mpt_downloadboot - DownloadBoot code + * @ioc: Pointer to MPT_ADAPTER structure + * @flag: Specify which part of IOC memory is to be uploaded. + * @sleepFlag: Specifies whether the process can sleep + * + * FwDownloadBoot requires Programmed IO access. + * + * Returns 0 for success + * -1 FW Image size is 0 + * -2 No valid FWImage Pointer + * <0 for fw upload failure. + */ +static int +mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag) +{ + MpiFwHeader_t *FwHdr = NULL; + MpiExtImageHeader_t *ExtHdr; + int fw_sz; + u32 diag0val; +#ifdef MPT_DEBUG + u32 diag1val = 0; +#endif + int count = 0; + u32 *ptru32 = NULL; + u32 diagRwData; + u32 nextImage; + + dprintk((MYIOC_s_INFO_FMT "DbGb0: downloadboot entered.\n", + ioc->name)); +#ifdef MPT_DEBUG + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbGb1: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + + dprintk((MYIOC_s_INFO_FMT "fw size 0x%x, ioc FW Ptr %p\n", + ioc->name, ioc->facts.FWImageSize, ioc->FWImage)); + if (ioc->alt_ioc) + dprintk((MYIOC_s_INFO_FMT "alt ioc FW Ptr %p\n", + ioc->name, ioc->alt_ioc->FWImage)); + + /* Get dma_addr and data transfer size. + */ + if ((fw_sz = ioc->facts.FWImageSize) == 0) + return -1; + + /* Get the DMA from ioc or ioc->alt_ioc */ + if (ioc->FWImage) + FwHdr = (MpiFwHeader_t *)ioc->FWImage; + else if (ioc->alt_ioc && ioc->alt_ioc->FWImage) + FwHdr = (MpiFwHeader_t *)ioc->alt_ioc->FWImage; + + dprintk((MYIOC_s_INFO_FMT "DbGb2: FW Image @ %p\n", + ioc->name, FwHdr)); + + if (!FwHdr) + return -2; + + /* Write magic sequence to WriteSequence register + * until enter diagnostic mode + */ + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + while ((diag0val & MPI_DIAG_DRWE) == 0) { + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); + + /* wait 100 msec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(100 * HZ / 1000); + } else { + mdelay (100); + } + + count++; + if (count > 20) { + printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", + ioc->name, diag0val); + return -EFAULT; + + } + + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbGb3: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + dprintk((MYIOC_s_INFO_FMT "Wrote magic DiagWriteEn sequence (%x)\n", + ioc->name, diag0val)); + } + + /* Set the DiagRwEn and Disable ARM bits */ + diag0val |= (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM); + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); + +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbGb3: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + + /* Write the LoadStartAddress to the DiagRw Address Register + * using Programmed IO + */ + + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, FwHdr->LoadStartAddress); + dprintk((MYIOC_s_INFO_FMT "LoadStart addr written 0x%x \n", + ioc->name, FwHdr->LoadStartAddress)); + + nextImage = FwHdr->NextImageHeaderOffset; + + /* round up count to a 32bit alignment */ + ptru32 = (u32 *) FwHdr; + count = (FwHdr->ImageSize + 3)/4; + + dprintk((MYIOC_s_INFO_FMT "Write FW Image: 0x%x u32's @ %p\n", + ioc->name, count, ptru32)); + while (count-- ) { + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptru32); + ptru32++; + } + + dprintk((MYIOC_s_INFO_FMT "FW Image done! \n", ioc->name)); + + while (nextImage) { + + /* Set the pointer to the extended image + */ + ExtHdr = (MpiExtImageHeader_t *) ((char *) FwHdr + nextImage); + + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, ExtHdr->LoadStartAddress); + + count = (ExtHdr->ImageSize + 3 )/4; + + ptru32 = (u32 *) ExtHdr; + dprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x u32's @ %p\n", + ioc->name, count, ptru32)); + while (count-- ) { + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptru32); + ptru32++; + } + nextImage = ExtHdr->NextImageHeaderOffset; + } + + + /* Write the IopResetVectorRegAddr */ + dprintk((MYIOC_s_INFO_FMT "Write IopResetVector Addr! \n", ioc->name)); + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, FwHdr->IopResetRegAddr); + + /* Write the IopResetVectorValue */ + dprintk((MYIOC_s_INFO_FMT "Write IopResetVector Value! \n", ioc->name)); + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, FwHdr->IopResetVectorValue); + + /* Clear the internal flash bad bit - autoincrementing register, + * so must do two writes. + */ + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); + diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData); + diagRwData |= 0x4000000; + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); + CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); + + /* clear the RW enable and DISARM bits */ + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + diag0val &= ~(MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE | MPI_DIAG_FLASH_BAD_SIG); + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); + + /* Write 0xFF to reset the sequencer */ + CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* * KickStart - Perform hard reset of MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure * @force: Force hard reset + * @sleepFlag: Specifies whether the process can sleep * * This routine places MPT adapter in diagnostic mode via the * WriteSequence register, and then performs a hard reset of adapter * via the Diagnostic register. * - * Returns 0 for soft reset success, 1 for hard reset success, - * else a negative value for failure. + * Inputs: sleepflag - CAN_SLEEP (non-interrupt thread) + * or NO_SLEEP (interrupt thread, use mdelay) + * force - 1 if doorbell active, board fault state + * board operational, IOC_RECOVERY or + * IOC_BRINGUP and there is an alt_ioc. + * 0 else + * + * Returns: + * 1 - hard reset, READY + * 0 - no reset due to History bit, READY + * -1 - no reset due to History bit but not READY + * OR reset but failed to come READY + * -2 - no reset, could not enter DIAG mode + * -3 - reset but bad FW bit */ static int -KickStart(MPT_ADAPTER *ioc, int force) +KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag) { int hard_reset_done = 0; u32 ioc_state; @@ -2056,183 +2742,295 @@ dprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name)); - hard_reset_done = mpt_fc9x9_reset(ioc, force); -#if 0 - if (ioc->chip_type == FC909 || ioc->chip-type == FC919) { - hard_reset_done = mpt_fc9x9_reset(ioc, force); - } else if (ioc->chip_type == FC929) { - unsigned long delta; - - delta = jiffies - ioc->last_kickstart; - dprintk((KERN_INFO MYNAM ": %s: 929 KickStart, last=%ld, delta = %ld\n", - ioc->name, ioc->last_kickstart, delta)); - if ((ioc->sod_reset == 0) || (delta >= 10*HZ)) - hard_reset_done = mpt_fc9x9_reset(ioc, ignore); - else { - dprintk((KERN_INFO MYNAM ": %s: Skipping KickStart (delta=%ld)!\n", - ioc->name, delta)); - return 0; - } - /* TODO! Add C1030! - } else if (ioc->chip_type == C1030) { - */ - } else { - printk(KERN_ERR MYNAM ": %s: ERROR - Bad chip_type (0x%x)\n", - ioc->name, ioc->chip_type); - return -5; - } -#endif - + hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag); if (hard_reset_done < 0) return hard_reset_done; - dprintk((KERN_INFO MYNAM ": %s: Diagnostic reset successful\n", + dprintk((MYIOC_s_INFO_FMT "Diagnostic reset successful!\n", ioc->name)); for (cnt=0; cntname, cnt)); return hard_reset_done; } - /* udelay(10000) ? */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay (10); + } } - printk(KERN_ERR MYNAM ": %s: ERROR - Failed to come READY after reset!\n", + printk(MYIOC_s_ERR_FMT "Failed to come READY after reset!\n", ioc->name); return -1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * mpt_fc9x9_reset - Perform hard reset of FC9x9 adapter. + * mpt_diag_reset - Perform hard reset of the adapter. * @ioc: Pointer to MPT_ADAPTER structure - * - * This routine places FC9x9 adapter in diagnostic mode via the - * WriteSequence register, and then performs a hard reset of adapter - * via the Diagnostic register. - * - * Returns 0 for success, non-zero for failure. + * @ignore: Set if to honor and clear to ignore + * the reset history bit + * @sleepflag: CAN_SLEEP if called in a non-interrupt thread, + * else set to NO_SLEEP (use mdelay instead) + * + * This routine places the adapter in diagnostic mode via the + * WriteSequence register and then performs a hard reset of adapter + * via the Diagnostic register. Adapter should be in ready state + * upon successful completion. + * + * Returns: 1 hard reset successful + * 0 no reset performed because reset history bit set + * -2 enabling diagnostic mode failed + * -3 diagnostic reset failed */ static int -mpt_fc9x9_reset(MPT_ADAPTER *ioc, int ignore) +mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) { u32 diag0val; + u32 doorbell; int hard_reset_done = 0; + int count = 0; +#ifdef MPT_DEBUG + u32 diag1val = 0; +#endif - /* Use "Diagnostic reset" method! (only thing available!) */ + /* Clear any existing interrupts */ + CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); + /* Use "Diagnostic reset" method! (only thing available!) */ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + #ifdef MPT_DEBUG -{ - u32 diag1val = 0; if (ioc->alt_ioc) diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DBG1: diag0=%08x, diag1=%08x\n", + dprintk((MYIOC_s_INFO_FMT "DbG1: diag0=%08x, diag1=%08x\n", ioc->name, diag0val, diag1val)); -} #endif - if (diag0val & MPI_DIAG_DRWE) { - dprintk((KERN_INFO MYNAM ": %s: DiagWriteEn bit already set\n", - ioc->name)); - } else { - /* Write magic sequence to WriteSequence register */ - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); - CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); - dprintk((KERN_INFO MYNAM ": %s: Wrote magic DiagWriteEn sequence [spot#1]\n", - ioc->name)); - } - diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + /* Do the reset if we are told to ignore the reset history + * or if the reset history is 0 + */ + if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) { + while ((diag0val & MPI_DIAG_DRWE) == 0) { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); + CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); + + /* wait 100 msec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(100 * HZ / 1000); + } else { + mdelay (100); + } + + count++; + if (count > 20) { + printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", + ioc->name, diag0val); + return -2; + + } + + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + + dprintk((MYIOC_s_INFO_FMT "Wrote magic DiagWriteEn sequence (%x)\n", + ioc->name, diag0val)); + } + #ifdef MPT_DEBUG -{ - u32 diag1val = 0; - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DbG2: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); -} + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbG2: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); #endif - if (!ignore && (diag0val & MPI_DIAG_RESET_HISTORY)) { - dprintk((KERN_INFO MYNAM ": %s: Skipping due to ResetHistory bit set!\n", - ioc->name)); - } else { + /* Write the PreventIocBoot bit */ + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + diag0val |= MPI_DIAG_PREVENT_IOC_BOOT; + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); + } + + /* + * Disable the ARM (Bug fix) + * + */ + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM); + mdelay (1); + /* * Now hit the reset bit in the Diagnostic register - * (THE BIG HAMMER!) + * (THE BIG HAMMER!) (Clears DRWE bit). */ - CHIPREG_WRITE32(&ioc->chip->Diagnostic, MPI_DIAG_RESET_ADAPTER); + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER); hard_reset_done = 1; - dprintk((KERN_INFO MYNAM ": %s: Diagnostic reset performed\n", + dprintk((MYIOC_s_INFO_FMT "Diagnostic reset performed\n", ioc->name)); - /* want udelay(100) */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + /* + * Call each currently registered protocol IOC reset handler + * with pre-reset indication. + * NOTE: If we're doing _IOC_BRINGUP, there can be no + * MptResetHandlers[] registered yet. + */ + { + int ii; + int r = 0; + + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + if (MptResetHandlers[ii]) { + dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n", + ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET); + if (ioc->alt_ioc) { + dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n", + ioc->name, ioc->alt_ioc->name, ii)); + r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); + } + } + } + /* FIXME? Examine results here? */ + } + + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + /* If the DownloadBoot operation fails, the + * IOC will be left unusable. This is a fatal error + * case. _diag_reset will return < 0 + */ + for (count = 0; count < 30; count ++) { + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT + "DbG2b: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) { + break; + } - /* Write magic sequence to WriteSequence register */ + /* wait 1 sec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + } else { + mdelay (1000); + } + } + if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) { + printk(KERN_WARNING MYNAM + ": firmware downloadboot failure (%d)!\n", count); + } + + } else { + /* Wait for FW to reload and for board + * to go to the READY state. + * Maximum wait is 30 seconds. + * If fail, no error will check again + * with calling program. + */ + for (count = 0; count < 30; count ++) { + doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); + doorbell &= MPI_IOC_STATE_MASK; + + if (doorbell == MPI_IOC_STATE_READY) { + break; + } + + /* wait 1 sec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + } else { + mdelay (1000); + } + } + } + } + + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); +#ifdef MPT_DEBUG + if (ioc->alt_ioc) + diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); + dprintk((MYIOC_s_INFO_FMT "DbG3: diag0=%08x, diag1=%08x\n", + ioc->name, diag0val, diag1val)); +#endif + + /* Clear RESET_HISTORY bit! Place board in the + * diagnostic mode to update the diag register. + */ + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + count = 0; + while ((diag0val & MPI_DIAG_DRWE) == 0) { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE); CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE); - dprintk((KERN_INFO MYNAM ": %s: Wrote magic DiagWriteEn sequence [spot#2]\n", - ioc->name)); - } - /* Clear RESET_HISTORY bit! */ - CHIPREG_WRITE32(&ioc->chip->Diagnostic, 0x0); + /* wait 100 msec */ + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(100 * HZ / 1000); + } else { + mdelay (100); + } + count++; + if (count > 20) { + printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n", + ioc->name, diag0val); + break; + } + diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + } + diag0val &= ~MPI_DIAG_RESET_HISTORY; + CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); -#ifdef MPT_DEBUG -{ - u32 diag1val = 0; - if (ioc->alt_ioc) - diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DbG3: diag0=%08x, diag1=%08x\n", - ioc->name, diag0val, diag1val)); -} -#endif if (diag0val & MPI_DIAG_RESET_HISTORY) { - printk(KERN_WARNING MYNAM ": %s: WARNING - ResetHistory bit failed to clear!\n", + printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n", ioc->name); } + /* Disable Diagnostic Mode + */ + CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF); + + /* Check FW reload status flags. + */ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); + if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) { + printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n", + ioc->name, diag0val); + return -3; + } + #ifdef MPT_DEBUG -{ - u32 diag1val = 0; if (ioc->alt_ioc) diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic); - dprintk((KERN_INFO MYNAM ": %s: DbG4: diag0=%08x, diag1=%08x\n", + dprintk((MYIOC_s_INFO_FMT "DbG4: diag0=%08x, diag1=%08x\n", ioc->name, diag0val, diag1val)); -} #endif - if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) { - printk(KERN_ERR MYNAM ": %s: ERROR - Diagnostic reset FAILED! (%02xh)\n", - ioc->name, diag0val); - return -3; - } /* * Reset flag that says we've enabled event notification */ ioc->facts.EventState = 0; - /* NEW! 20010220 -sralston - * Try to avoid redundant resets of the 929. - */ - ioc->sod_reset++; - ioc->last_kickstart = jiffies; - if (ioc->alt_ioc) { - ioc->alt_ioc->sod_reset = ioc->sod_reset; - ioc->alt_ioc->last_kickstart = ioc->last_kickstart; - } + if (ioc->alt_ioc) + ioc->alt_ioc->facts.EventState = 0; return hard_reset_done; } @@ -2249,16 +3047,45 @@ * Returns 0 for success, non-zero for failure. */ static int -SendIocReset(MPT_ADAPTER *ioc, u8 reset_type) +SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag) { int r; + u32 state; + int cntdn, count; dprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n", ioc->name, reset_type)); CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<name, (count+5)/HZ); + return -ETIME; + } + + if (sleepFlag == CAN_SLEEP) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + } else { + mdelay (1); /* 1 msec delay */ + } + } + /* TODO! * Cleanup all event stuff for this IOC; re-issue EventNotification * request if needed. @@ -2275,7 +3102,8 @@ * @ioc: Pointer to MPT_ADAPTER structure * * This routine allocates memory for the MPT reply and request frame - * pools, and primes the IOC reply FIFO with reply frames. + * pools (if necessary), and primes the IOC reply FIFO with + * reply frames. * * Returns 0 for success, non-zero for failure. */ @@ -2284,6 +3112,7 @@ { MPT_FRAME_HDR *mf; unsigned long b; + unsigned long flags; dma_addr_t aligned_mem_dma; u8 *mem, *aligned_mem; int i, sz; @@ -2299,8 +3128,8 @@ memset(mem, 0, sz); ioc->alloc_total += sz; ioc->reply_alloc = mem; - dprintk((KERN_INFO MYNAM ": %s.reply_alloc @ %p[%08x], sz=%d bytes\n", - ioc->name, mem, ioc->reply_alloc_dma, sz)); + dprintk((KERN_INFO MYNAM ": %s.reply_alloc @ %p[%p], sz=%d bytes\n", + ioc->name, mem, (void *)(ulong)ioc->reply_alloc_dma, sz)); b = (unsigned long) mem; b = (b + (0x80UL - 1UL)) & ~(0x80UL - 1UL); /* round up to 128-byte boundary */ @@ -2308,15 +3137,20 @@ ioc->reply_frames = (MPT_FRAME_HDR *) aligned_mem; ioc->reply_frames_dma = (ioc->reply_alloc_dma + (aligned_mem - mem)); - aligned_mem_dma = ioc->reply_frames_dma; - dprintk((KERN_INFO MYNAM ": %s.reply_frames @ %p[%08x]\n", - ioc->name, aligned_mem, aligned_mem_dma)); - - for (i = 0; i < ioc->reply_depth; i++) { - /* Write each address to the IOC! */ - CHIPREG_WRITE32(&ioc->chip->ReplyFifo, aligned_mem_dma); - aligned_mem_dma += ioc->reply_sz; - } + + ioc->reply_frames_low_dma = (u32) (ioc->reply_frames_dma & 0xFFFFFFFF); + } + + /* Post Reply frames to FIFO + */ + aligned_mem_dma = ioc->reply_frames_dma; + dprintk((KERN_INFO MYNAM ": %s.reply_frames @ %p[%p]\n", + ioc->name, ioc->reply_frames, (void *)(ulong)aligned_mem_dma)); + + for (i = 0; i < ioc->reply_depth; i++) { + /* Write each address to the IOC! */ + CHIPREG_WRITE32(&ioc->chip->ReplyFifo, aligned_mem_dma); + aligned_mem_dma += ioc->reply_sz; } @@ -2336,8 +3170,8 @@ memset(mem, 0, sz); ioc->alloc_total += sz; ioc->req_alloc = mem; - dprintk((KERN_INFO MYNAM ": %s.req_alloc @ %p[%08x], sz=%d bytes\n", - ioc->name, mem, ioc->req_alloc_dma, sz)); + dprintk((KERN_INFO MYNAM ": %s.req_alloc @ %p[%p], sz=%d bytes\n", + ioc->name, mem, (void *)(ulong)ioc->req_alloc_dma, sz)); b = (unsigned long) mem; b = (b + (0x80UL - 1UL)) & ~(0x80UL - 1UL); /* round up to 128-byte boundary */ @@ -2345,18 +3179,18 @@ ioc->req_frames = (MPT_FRAME_HDR *) aligned_mem; ioc->req_frames_dma = (ioc->req_alloc_dma + (aligned_mem - mem)); - aligned_mem_dma = ioc->req_frames_dma; - dprintk((KERN_INFO MYNAM ": %s.req_frames @ %p[%08x]\n", - ioc->name, aligned_mem, aligned_mem_dma)); + ioc->req_frames_low_dma = (u32) (ioc->req_frames_dma & 0xFFFFFFFF); - for (i = 0; i < ioc->req_depth; i++) { - mf = (MPT_FRAME_HDR *) aligned_mem; - - /* Queue REQUESTs *internally*! */ - Q_ADD_TAIL(&ioc->FreeQ.head, &mf->u.frame.linkage, MPT_FRAME_HDR); - aligned_mem += ioc->req_sz; +#ifdef __ia64__ + /* Check: upper 32-bits of the request and reply frame + * physical addresses must be the same. + * ia64 check only + */ + if ((ioc->req_frames_dma >> 32) != (ioc->reply_frames_dma >> 32)){ + goto out_fail; } +#endif #if defined(CONFIG_MTRR) && 0 /* @@ -2367,20 +3201,38 @@ ioc->mtrr_reg = mtrr_add(ioc->req_alloc_dma, sz, MTRR_TYPE_WRCOMB, 1); - dprintk((KERN_INFO MYNAM ": %s: MTRR region registered (base:size=%08x:%x)\n", - ioc->name, ioc->req_alloc_dma, - sz )); + dprintk((MYIOC_s_INFO_FMT "MTRR region registered (base:size=%08x:%x)\n", + ioc->name, ioc->req_alloc_dma, sz)); #endif + } + /* Initialize Request frames linked list + */ + aligned_mem_dma = ioc->req_frames_dma; + aligned_mem = (u8 *) ioc->req_frames; + dprintk((KERN_INFO MYNAM ": %s.req_frames @ %p[%p]\n", + ioc->name, aligned_mem, (void *)(ulong)aligned_mem_dma)); + + spin_lock_irqsave(&ioc->FreeQlock, flags); + Q_INIT(&ioc->FreeQ, MPT_FRAME_HDR); + for (i = 0; i < ioc->req_depth; i++) { + mf = (MPT_FRAME_HDR *) aligned_mem; + + /* Queue REQUESTs *internally*! */ + Q_ADD_TAIL(&ioc->FreeQ.head, &mf->u.frame.linkage, MPT_FRAME_HDR); + aligned_mem += ioc->req_sz; } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + if (ioc->sense_buf_pool == NULL) { - sz = (ioc->req_depth * 256); + sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); ioc->sense_buf_pool = pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma); if (ioc->sense_buf_pool == NULL) goto out_fail; + ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF); ioc->alloc_total += sz; } @@ -2408,7 +3260,7 @@ #if defined(CONFIG_MTRR) && 0 if (ioc->mtrr_reg > 0) { mtrr_del(ioc->mtrr_reg, 0, 0); - dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", + dprintk((MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name)); } #endif @@ -2417,7 +3269,7 @@ ioc->alloc_total -= sz; } if (ioc->sense_buf_pool != NULL) { - sz = (ioc->req_depth * 256); + sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC); pci_free_consistent(ioc->pcidev, sz, ioc->sense_buf_pool, ioc->sense_buf_pool_dma); @@ -2427,8 +3279,8 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/* - * HandShakeReqAndReply - Send MPT request to and receive reply from +/** + * mpt_handshake_req_reply_wait - Send MPT request to and receive reply from * IOC via doorbell handshake method. * @ioc: Pointer to MPT_ADAPTER structure * @reqBytes: Size of the request in bytes @@ -2436,6 +3288,7 @@ * @replyBytes: Expected size of the reply in bytes * @u16reply: Pointer to area where reply should be written * @maxwait: Max wait time for a reply (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * NOTES: It is the callers responsibility to byte-swap fields in the * request which are greater than 1 byte in size. It is also the @@ -2444,8 +3297,9 @@ * * Returns 0 for success, non-zero for failure. */ -static int -HandShakeReqAndReply(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait) +int +mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, + int replyBytes, u16 *u16reply, int maxwait, int sleepFlag) { MPIDefaultReply_t *mptReply; int failcnt = 0; @@ -2471,57 +3325,61 @@ /* * Wait for IOC's doorbell handshake int */ - if ((t = WaitForDoorbellInt(ioc, 2)) < 0) + if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) failcnt++; - dhsprintk((KERN_INFO MYNAM ": %s: HandShake request start, WaitCnt=%d%s\n", + dhsprintk((MYIOC_s_INFO_FMT "HandShake request start, WaitCnt=%d%s\n", ioc->name, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); + /* Read doorbell and check for active bit */ + if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE)) + return -1; + /* * Clear doorbell int (WRITE 0 to IntStatus reg), * then wait for IOC to ACKnowledge that it's ready for * our handshake request. */ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); - if (!failcnt && (t = WaitForDoorbellAck(ioc, 2)) < 0) + if (!failcnt && (t = WaitForDoorbellAck(ioc, 2, sleepFlag)) < 0) failcnt++; if (!failcnt) { - int i; + int ii; u8 *req_as_bytes = (u8 *) req; /* * Stuff request words via doorbell handshake, * with ACK from IOC for each. */ - for (i = 0; !failcnt && i < reqBytes/4; i++) { - u32 word = ((req_as_bytes[(i*4) + 0] << 0) | - (req_as_bytes[(i*4) + 1] << 8) | - (req_as_bytes[(i*4) + 2] << 16) | - (req_as_bytes[(i*4) + 3] << 24)); + for (ii = 0; !failcnt && ii < reqBytes/4; ii++) { + u32 word = ((req_as_bytes[(ii*4) + 0] << 0) | + (req_as_bytes[(ii*4) + 1] << 8) | + (req_as_bytes[(ii*4) + 2] << 16) | + (req_as_bytes[(ii*4) + 3] << 24)); CHIPREG_WRITE32(&ioc->chip->Doorbell, word); - if ((t = WaitForDoorbellAck(ioc, 2)) < 0) + if ((t = WaitForDoorbellAck(ioc, 2, sleepFlag)) < 0) failcnt++; } dmfprintk((KERN_INFO MYNAM ": Handshake request frame (@%p) header\n", req)); DBG_DUMP_REQUEST_FRAME_HDR(req) - dhsprintk((KERN_INFO MYNAM ": %s: HandShake request post done, WaitCnt=%d%s\n", + dhsprintk((MYIOC_s_INFO_FMT "HandShake request post done, WaitCnt=%d%s\n", ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : "")); /* * Wait for completion of doorbell handshake reply from the IOC */ - if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait)) < 0) + if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0) failcnt++; /* * Copy out the cached reply... */ - for(i=0; i < MIN(replyBytes/2,mptReply->MsgLength*2); i++) - u16reply[i] = ioc->hs_reply[i]; + for (ii=0; ii < MIN(replyBytes/2,mptReply->MsgLength*2); ii++) + u16reply[ii] = ioc->hs_reply[ii]; } else { return -99; } @@ -2535,6 +3393,7 @@ * in it's IntStatus register. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * This routine waits (up to ~2 seconds max) for IOC doorbell * handshake ACKnowledge. @@ -2542,28 +3401,40 @@ * Returns a negative value on failure, else wait loop count. */ static int -WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong) +WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag) { int cntdn = HZ * howlong; int count = 0; u32 intstat; - while (--cntdn) { - intstat = CHIPREG_READ32(&ioc->chip->IntStatus); - if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) - break; - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - count++; + if (sleepFlag == CAN_SLEEP) { + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) + break; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + count++; + } + } else { + cntdn *= 10; /* convert to msec */ + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS)) + break; + mdelay (1); + count++; + } + count /= 10; } if (cntdn) { - dhsprintk((KERN_INFO MYNAM ": %s: WaitForDoorbell ACK (cnt=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell ACK (cnt=%d)\n", ioc->name, count)); return count; } - printk(KERN_ERR MYNAM ": %s: ERROR - Doorbell ACK timeout(%d)!\n", + printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout(%d)!\n", ioc->name, (count+5)/HZ); return -1; } @@ -2574,34 +3445,47 @@ * in it's IntStatus register. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt. * * Returns a negative value on failure, else wait loop count. */ static int -WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong) +WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag) { int cntdn = HZ * howlong; int count = 0; u32 intstat; - while (--cntdn) { - intstat = CHIPREG_READ32(&ioc->chip->IntStatus); - if (intstat & MPI_HIS_DOORBELL_INTERRUPT) - break; - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - count++; + if (sleepFlag == CAN_SLEEP) { + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (intstat & MPI_HIS_DOORBELL_INTERRUPT) + break; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + count++; + } + } else { + cntdn *= 10; /* convert to msec */ + while (--cntdn) { + intstat = CHIPREG_READ32(&ioc->chip->IntStatus); + if (intstat & MPI_HIS_DOORBELL_INTERRUPT) + break; + mdelay(1); + count++; + } + count /= 10; } if (cntdn) { - dhsprintk((KERN_INFO MYNAM ": %s: WaitForDoorbell INT (cnt=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell INT (cnt=%d)\n", ioc->name, count)); return count; } - printk(KERN_ERR MYNAM ": %s: ERROR - Doorbell INT timeout(%d)!\n", + printk(MYIOC_s_ERR_FMT "Doorbell INT timeout(%d)!\n", ioc->name, (count+5)/HZ); return -1; } @@ -2611,6 +3495,7 @@ * WaitForDoorbellReply - Wait for and capture a IOC handshake reply. * @ioc: Pointer to MPT_ADAPTER structure * @howlong: How long to wait (in seconds) + * @sleepFlag: Specifies whether the process can sleep * * This routine polls the IOC for a handshake reply, 16 bits at a time. * Reply is cached to IOC private area large enough to hold a maximum @@ -2619,13 +3504,13 @@ * Returns a negative value on failure, else size of reply in WORDS. */ static int -WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong) +WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag) { int u16cnt = 0; int failcnt = 0; int t; u16 *hs_reply = ioc->hs_reply; - volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply; + volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply; u16 hword; hs_reply[0] = hs_reply[1] = hs_reply[7] = 0; @@ -2634,12 +3519,12 @@ * Get first two u16's so we can look at IOC's intended reply MsgLength */ u16cnt=0; - if ((t = WaitForDoorbellInt(ioc, howlong)) < 0) { + if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) { failcnt++; } else { hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); - if ((t = WaitForDoorbellInt(ioc, 2)) < 0) + if ((t = WaitForDoorbellInt(ioc, 2, sleepFlag)) < 0) failcnt++; else { hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); @@ -2647,7 +3532,7 @@ } } - dhsprintk((KERN_INFO MYNAM ": %s: First handshake reply word=%08x%s\n", + dhsprintk((MYIOC_s_INFO_FMT "First handshake reply word=%08x%s\n", ioc->name, le32_to_cpu(*(u32 *)hs_reply), failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); @@ -2656,7 +3541,7 @@ * reply 16 bits at a time. */ for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) { - if ((t = WaitForDoorbellInt(ioc, 2)) < 0) + if ((t = WaitForDoorbellInt(ioc, 2, sleepFlag)) < 0) failcnt++; hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF); /* don't overflow our IOC hs_reply[] buffer! */ @@ -2665,12 +3550,12 @@ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); } - if (!failcnt && (t = WaitForDoorbellInt(ioc, 2)) < 0) + if (!failcnt && (t = WaitForDoorbellInt(ioc, 2, sleepFlag)) < 0) failcnt++; CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); if (failcnt) { - printk(KERN_ERR MYNAM ": %s: ERROR - Handshake reply failure!\n", + printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n", ioc->name); return -failcnt; } @@ -2683,10 +3568,10 @@ } #endif - dmfprintk((KERN_INFO MYNAM ": %s: Got Handshake reply:\n", ioc->name)); + dmfprintk((MYIOC_s_INFO_FMT "Got Handshake reply:\n", ioc->name)); DBG_DUMP_REPLY_FRAME(mptReply) - dhsprintk((KERN_INFO MYNAM ": %s: WaitForDoorbell REPLY (sz=%d)\n", + dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell REPLY (sz=%d)\n", ioc->name, u16cnt/2)); return u16cnt/2; } @@ -2696,115 +3581,616 @@ * GetLanConfigPages - Fetch LANConfig pages. * @ioc: Pointer to MPT_ADAPTER structure * - * Returns 0 for success, non-zero for failure. + * Return: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) */ static int GetLanConfigPages(MPT_ADAPTER *ioc) { - Config_t config_req; - ConfigReply_t config_reply; - LANPage0_t *page0; + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + LANPage0_t *ppage0_alloc; dma_addr_t page0_dma; - LANPage1_t *page1; + LANPage1_t *ppage1_alloc; dma_addr_t page1_dma; - int i; - int req_sz; - int reply_sz; + int rc = 0; int data_sz; + int copy_sz; -/* LANPage0 */ - /* Immediate destination (reply area)... */ - reply_sz = sizeof(config_reply); - memset(&config_reply, 0, reply_sz); - - /* Ultimate destination... */ - page0 = &ioc->lan_cnfg_page0; - data_sz = sizeof(*page0); - memset(page0, 0, data_sz); - - /* Request area (config_req on the stack right now!) */ - req_sz = sizeof(config_req); - memset(&config_req, 0, req_sz); - config_req.Function = MPI_FUNCTION_CONFIG; - config_req.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - /* config_req.Header.PageVersion = 0; */ - /* config_req.Header.PageLength = 0; */ - config_req.Header.PageNumber = 0; - config_req.Header.PageType = MPI_CONFIG_PAGETYPE_LAN; - /* config_req.PageAddress = 0; */ - config_req.PageBufferSGE.u.Simple.FlagsLength = cpu_to_le32( - ((MPI_SGE_FLAGS_LAST_ELEMENT | - MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_END_OF_LIST | - MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_SYSTEM_ADDRESS | - MPI_SGE_FLAGS_32_BIT_ADDRESSING | - MPI_SGE_FLAGS_32_BIT_CONTEXT) << MPI_SGE_FLAGS_SHIFT) | - (u32)data_sz - ); - page0_dma = pci_map_single(ioc->pcidev, page0, data_sz, PCI_DMA_FROMDEVICE); - config_req.PageBufferSGE.u.Simple.u.Address32 = cpu_to_le32(page0_dma); + /* Get LAN Page 0 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = 0; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength > 0) { + data_sz = hdr.PageLength * 4; + ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); + rc = -ENOMEM; + if (ppage0_alloc) { + memset((u8 *)ppage0_alloc, 0, data_sz); + cfg.physAddr = page0_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + copy_sz = MIN(sizeof(LANPage0_t), data_sz); + memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz); - dprintk((KERN_INFO MYNAM ": %s: Sending Config request LAN_PAGE_0\n", - ioc->name)); + } - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&config_req, - reply_sz, (u16*)&config_reply, 3); - pci_unmap_single(ioc->pcidev, page0_dma, data_sz, PCI_DMA_FROMDEVICE); - if (i != 0) - return i; + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); - /* Now byte swap the necessary LANPage0 fields */ + /* FIXME! + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ -/* LANPage1 */ - /* Immediate destination (reply area)... */ - reply_sz = sizeof(config_reply); - memset(&config_reply, 0, reply_sz); - - /* Ultimate destination... */ - page1 = &ioc->lan_cnfg_page1; - data_sz = sizeof(*page1); - memset(page1, 0, data_sz); - - /* Request area (config_req on the stack right now!) */ - req_sz = sizeof(config_req); - memset(&config_req, 0, req_sz); - config_req.Function = MPI_FUNCTION_CONFIG; - config_req.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - /* config_req.Header.PageVersion = 0; */ - /* config_req.Header.PageLength = 0; */ - config_req.Header.PageNumber = 1; - config_req.Header.PageType = MPI_CONFIG_PAGETYPE_LAN; - /* config_req.PageAddress = 0; */ - config_req.PageBufferSGE.u.Simple.FlagsLength = cpu_to_le32( - ((MPI_SGE_FLAGS_LAST_ELEMENT | - MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_END_OF_LIST | - MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_SYSTEM_ADDRESS | - MPI_SGE_FLAGS_32_BIT_ADDRESSING | - MPI_SGE_FLAGS_32_BIT_CONTEXT) << MPI_SGE_FLAGS_SHIFT) | - (u32)data_sz - ); - page1_dma = pci_map_single(ioc->pcidev, page1, data_sz, PCI_DMA_FROMDEVICE); - config_req.PageBufferSGE.u.Simple.u.Address32 = cpu_to_le32(page1_dma); + } - dprintk((KERN_INFO MYNAM ": %s: Sending Config request LAN_PAGE_1\n", - ioc->name)); + if (rc) + return rc; + } - i = HandShakeReqAndReply(ioc, req_sz, (u32*)&config_req, - reply_sz, (u16*)&config_reply, 3); - pci_unmap_single(ioc->pcidev, page1_dma, data_sz, PCI_DMA_FROMDEVICE); - if (i != 0) - return i; + /* Get LAN Page 1 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 1; + hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma); + if (ppage1_alloc) { + memset((u8 *)ppage1_alloc, 0, data_sz); + cfg.physAddr = page1_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + copy_sz = MIN(sizeof(LANPage1_t), data_sz); + memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz); + } + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma); + + /* FIXME! + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ + + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * GetFcPortPage0 - Fetch FCPort config Page0. + * @ioc: Pointer to MPT_ADAPTER structure + * @portnum: IOC Port number + * + * Return: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + */ +static int +GetFcPortPage0(MPT_ADAPTER *ioc, int portnum) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + FCPortPage0_t *ppage0_alloc; + FCPortPage0_t *pp0dest; + dma_addr_t page0_dma; + int data_sz; + int copy_sz; + int rc; + + /* Get FCPort Page 0 header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = portnum; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma); + if (ppage0_alloc) { + memset((u8 *)ppage0_alloc, 0, data_sz); + cfg.physAddr = page0_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + if ((rc = mpt_config(ioc, &cfg)) == 0) { + /* save the data */ + pp0dest = &ioc->fc_port_page0[portnum]; + copy_sz = MIN(sizeof(FCPortPage0_t), data_sz); + memcpy(pp0dest, ppage0_alloc, copy_sz); + + /* + * Normalize endianness of structure data, + * by byte-swapping all > 1 byte fields! + */ + pp0dest->Flags = le32_to_cpu(pp0dest->Flags); + pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier); + pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low); + pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High); + pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low); + pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High); + pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass); + pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds); + pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed); + pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize); + pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low); + pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High); + pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low); + pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High); + pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount); + pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators); + + } + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma); + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * GetIoUnitPage2 - Retrieve BIOS version and boot order information. + * @ioc: Pointer to MPT_ADAPTER structure + * + * Returns: 0 for success + * -ENOMEM if no memory available + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + */ +static int +GetIoUnitPage2(MPT_ADAPTER *ioc) +{ + ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + IOUnitPage2_t *ppage_alloc; + dma_addr_t page_dma; + int data_sz; + int rc; + + /* Get the page header */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 2; + hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.pageAddr = 0; + cfg.timeout = 0; + + if ((rc = mpt_config(ioc, &cfg)) != 0) + return rc; + + if (hdr.PageLength == 0) + return 0; + + /* Read the config page */ + data_sz = hdr.PageLength * 4; + rc = -ENOMEM; + ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); + if (ppage_alloc) { + memset((u8 *)ppage_alloc, 0, data_sz); + cfg.physAddr = page_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + /* If Good, save data */ + if ((rc = mpt_config(ioc, &cfg)) == 0) + ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion); + + pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma); + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2 + * @ioc: Pointer to a Adapter Strucutre + * @portnum: IOC port number + * + * Return: -EFAULT if read of config page header fails + * or if no nvram + * If read of SCSI Port Page 0 fails, + * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) + * Adapter settings: async, narrow + * Return 1 + * If read of SCSI Port Page 2 fails, + * Adapter settings valid + * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) + * Return 1 + * Else + * Both valid + * Return 0 + * CHECK - what type of locking mechanisms should be used???? + */ +static int +mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum) +{ + u8 *pbuf = NULL; + dma_addr_t buf_dma; + CONFIGPARMS cfg; + ConfigPageHeader_t header; + int ii; + int data, rc = 0; + + /* Allocate memory + */ + if (!ioc->spi_data.nvram) { + int sz; + u8 *mem; + sz = MPT_MAX_SCSI_DEVICES * sizeof(int); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + return -EFAULT; + + ioc->spi_data.nvram = (int *) mem; + + dprintk((MYIOC_s_INFO_FMT "SCSI device NVRAM settings @ %p, sz=%d\n", + ioc->name, ioc->spi_data.nvram, sz)); + } + + /* Invalidate NVRAM information + */ + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID; + } + + /* Read SPP0 header, allocate memory, then read page. + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 0; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = portnum; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; /* use default */ + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + if (header.PageLength > 0) { + pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) != 0) { + ioc->spi_data.maxBusWidth = MPT_NARROW; + ioc->spi_data.maxSyncOffset = 0; + ioc->spi_data.minSyncFactor = MPT_ASYNC; + ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN; + rc = 1; + } else { + /* Save the Port Page 0 data + */ + SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf; + pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities); + pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface); + + ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0; + data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK; + if (data) { + ioc->spi_data.maxSyncOffset = (u8) (data >> 16); + data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK; + ioc->spi_data.minSyncFactor = (u8) (data >> 8); + } else { + ioc->spi_data.maxSyncOffset = 0; + ioc->spi_data.minSyncFactor = MPT_ASYNC; + } + + ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK; + + /* Update the minSyncFactor based on bus type. + */ + if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) || + (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) { + + if (ioc->spi_data.minSyncFactor < MPT_ULTRA) + ioc->spi_data.minSyncFactor = MPT_ULTRA; + } + } + if (pbuf) { + pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + } + + /* SCSI Port Page 2 - Read the header then the page. + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 2; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = portnum; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + if (header.PageLength > 0) { + /* Allocate memory and read SCSI Port Page 2 + */ + pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM; + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) != 0) { + /* Nvram data is left with INVALID mark + */ + rc = 1; + } else { + SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf; + MpiDeviceInfo_t *pdevice = NULL; - /* Now byte swap the necessary LANPage1 fields */ + /* Save the Port Page 2 data + * (reformat into a 32bit quantity) + */ + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + pdevice = &pPP2->DeviceSettings[ii]; + data = (le16_to_cpu(pdevice->DeviceFlags) << 16) | + (pdevice->SyncFactor << 8) | pdevice->Timeout; + ioc->spi_data.nvram[ii] = data; + } + } + + pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + + /* Update Adapter limits with those from NVRAM + * Comment: Don't need to do this. Target performance + * parameters will never exceed the adapters limits. + */ + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mpt_readScsiDevicePageHeaders - save version and length of SDP1 + * @ioc: Pointer to a Adapter Strucutre + * @portnum: IOC port number + * + * Return: -EFAULT if read of config page header fails + * or 0 if success. + */ +static int +mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum) +{ + CONFIGPARMS cfg; + ConfigPageHeader_t header; + + /* Read the SCSI Device Page 1 header + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 1; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = portnum; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + ioc->spi_data.sdp1version = cfg.hdr->PageVersion; + ioc->spi_data.sdp1length = cfg.hdr->PageLength; + + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 0; + header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + ioc->spi_data.sdp0version = cfg.hdr->PageVersion; + ioc->spi_data.sdp0length = cfg.hdr->PageLength; return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** + * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes + * @ioc: Pointer to a Adapter Strucutre + * @portnum: IOC port number + * + * Return: + * 0 on success + * -EFAULT if read of config page header fails or data pointer not NULL + * -ENOMEM if pci_alloc failed + */ +static int +mpt_findImVolumes(MPT_ADAPTER *ioc) +{ + IOCPage2_t *pIoc2 = NULL; + IOCPage3_t *pIoc3 = NULL; + ConfigPageIoc2RaidVol_t *pIocRv = NULL; + u8 *mem; + dma_addr_t ioc2_dma; + dma_addr_t ioc3_dma; + CONFIGPARMS cfg; + ConfigPageHeader_t header; + int jj; + int rc = 0; + int iocpage2sz; + int iocpage3sz = 0; + u8 nVols, nPhys; + u8 vid, vbus, vioc; + + if (ioc->spi_data.pIocPg3) + return -EFAULT; + + /* Read IOCP2 header then the page. + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 2; + header.PageType = MPI_CONFIG_PAGETYPE_IOC; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + if (mpt_config(ioc, &cfg) != 0) + return -EFAULT; + + if (header.PageLength == 0) + return -EFAULT; + + iocpage2sz = header.PageLength * 4; + pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma); + if (!pIoc2) + return -ENOMEM; + + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.physAddr = ioc2_dma; + if (mpt_config(ioc, &cfg) != 0) + goto done_and_free; + + /* Identify RAID Volume Id's */ + nVols = pIoc2->NumActiveVolumes; + if ( nVols == 0) { + /* No RAID Volumes. Done. + */ + } else { + /* At least 1 RAID Volume + */ + pIocRv = pIoc2->RaidVolume; + ioc->spi_data.isRaid = 0; + for (jj = 0; jj < nVols; jj++, pIocRv++) { + vid = pIocRv->VolumeID; + vbus = pIocRv->VolumeBus; + vioc = pIocRv->VolumeIOC; + + /* find the match + */ + if (vbus == 0) { + ioc->spi_data.isRaid |= (1 << vid); + } else { + /* Error! Always bus 0 + */ + } + } + } + + /* Identify Hidden Physical Disk Id's */ + nPhys = pIoc2->NumActivePhysDisks; + if (nPhys == 0) { + /* No physical disks. Done. + */ + } else { + /* There is at least one physical disk. + * Read and save IOC Page 3 + */ + header.PageVersion = 0; + header.PageLength = 0; + header.PageNumber = 3; + header.PageType = MPI_CONFIG_PAGETYPE_IOC; + cfg.hdr = &header; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; + cfg.timeout = 0; + if (mpt_config(ioc, &cfg) != 0) + goto done_and_free; + + if (header.PageLength == 0) + goto done_and_free; + + /* Read Header good, alloc memory + */ + iocpage3sz = header.PageLength * 4; + pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma); + if (!pIoc3) + goto done_and_free; + + /* Read the Page and save the data + * into malloc'd memory. + */ + cfg.physAddr = ioc3_dma; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + if (mpt_config(ioc, &cfg) == 0) { + mem = kmalloc(iocpage3sz, GFP_KERNEL); + if (mem) { + memcpy(mem, (u8 *)pIoc3, iocpage3sz); + ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem; + } + } + } + +done_and_free: + if (pIoc2) { + pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma); + pIoc2 = NULL; + } + + if (pIoc3) { + pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma); + pIoc3 = NULL; + } + + return rc; +} + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* * SendEventNotification - Send EventNotification (on or off) request * to MPT adapter. * @ioc: Pointer to MPT_ADAPTER structure @@ -2817,13 +4203,13 @@ evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc->id); if (evnp == NULL) { - dprintk((KERN_WARNING MYNAM ": %s: WARNING - Unable to allocate a event request frame!\n", + dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n", ioc->name)); return 0; } memset(evnp, 0, sizeof(*evnp)); - dprintk((KERN_INFO MYNAM ": %s: Sending EventNotification(%d)\n", ioc->name, EvSwitch)); + dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch)); evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; evnp->ChainOffset = 0; @@ -2847,13 +4233,13 @@ EventAck_t *pAck; if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc->id)) == NULL) { - printk(KERN_WARNING MYNAM ": %s: WARNING - Unable to allocate event ACK request frame!\n", + printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n", ioc->name); return -1; } memset(pAck, 0, sizeof(*pAck)); - dprintk((KERN_INFO MYNAM ": %s: Sending EventAck\n", ioc->name)); + dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name)); pAck->Function = MPI_FUNCTION_EVENT_ACK; pAck->ChainOffset = 0; @@ -2866,25 +4252,212 @@ return 0; } -#ifdef CONFIG_PROC_FS /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mpt_config - Generic function to issue config message + * @ioc - Pointer to an adapter structure + * @cfg - Pointer to a configuration structure. Struct contains + * action, page address, direction, physical address + * and pointer to a configuration page header + * Page header is updated. + * + * Returns 0 for success + * -EPERM if not allowed due to ISR context + * -EAGAIN if no msg frames currently available + * -EFAULT for non-successful reply or no reply (timeout) + */ +int +mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) +{ + Config_t *pReq; + MPT_FRAME_HDR *mf; + MptSge_t *psge; + unsigned long flags; + int ii, rc; + int flagsLength; + int in_isr; + + /* (Bugzilla:fibrebugs, #513) + * Bug fix (part 1)! 20010905 -sralston + * Prevent calling wait_event() (below), if caller happens + * to be in ISR context, because that is fatal! + */ + in_isr = in_interrupt(); + if (in_isr) { + dprintk((MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", + ioc->name)); + return -EPERM; + } + + /* Get and Populate a free Frame + */ + if ((mf = mpt_get_msg_frame(mpt_base_index, ioc->id)) == NULL) { + dprintk((MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", + ioc->name)); + return -EAGAIN; + } + pReq = (Config_t *)mf; + pReq->Action = pCfg->action; + pReq->Reserved = 0; + pReq->ChainOffset = 0; + pReq->Function = MPI_FUNCTION_CONFIG; + pReq->Reserved1[0] = 0; + pReq->Reserved1[1] = 0; + pReq->Reserved1[2] = 0; + pReq->MsgFlags = 0; + for (ii=0; ii < 8; ii++) + pReq->Reserved2[ii] = 0; + + pReq->Header.PageVersion = pCfg->hdr->PageVersion; + pReq->Header.PageLength = pCfg->hdr->PageLength; + pReq->Header.PageNumber = pCfg->hdr->PageNumber; + pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); + pReq->PageAddress = cpu_to_le32(pCfg->pageAddr); + + /* Add a SGE to the config request. + */ + flagsLength = ((MPI_SGE_FLAGS_LAST_ELEMENT | + MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPT_SGE_ADDRESS_SIZE ) << MPI_SGE_FLAGS_SHIFT) | + pCfg->hdr->PageLength * 4; + + if (pCfg->dir) + flagsLength |= (MPI_SGE_FLAGS_DIRECTION << MPI_SGE_FLAGS_SHIFT); + + psge = (MptSge_t *) &pReq->PageBufferSGE; + psge->FlagsLength = cpu_to_le32(flagsLength); + cpu_to_leXX(pCfg->physAddr, psge->Address); + + dprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n", + ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action)); + + /* Append pCfg pointer to end of mf + */ + *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; + + /* Initalize the timer + */ + init_timer(&pCfg->timer); + pCfg->timer.data = (unsigned long) ioc; + pCfg->timer.function = mpt_timer_expired; + pCfg->wait_done = 0; + + /* Set the timer; ensure 10 second minimum */ + if (pCfg->timeout < 10) + pCfg->timer.expires = jiffies + HZ*10; + else + pCfg->timer.expires = jiffies + HZ*pCfg->timeout; + + /* Add to end of Q, set timer and then issue this command */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + Q_ADD_TAIL(&ioc->configQ.head, &pCfg->linkage, Q_ITEM); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + add_timer(&pCfg->timer); + mpt_put_msg_frame(mpt_base_index, ioc->id, mf); + wait_event(mpt_waitq, pCfg->wait_done); + + /* mf has been freed - do not access */ + + rc = pCfg->status; + + return rc; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff... + * mpt_timer_expired - Call back for timer process. + * Used only internal config functionality. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + */ +static void +mpt_timer_expired(unsigned long data) +{ + MPT_ADAPTER *ioc = (MPT_ADAPTER *) data; + + dprintk((MYIOC_s_WARN_FMT "mpt_timer_expired! \n", ioc->name)); + + /* Perform a FW reload */ + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) + printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name); + + /* No more processing. + * Hard reset clean-up will wake up + * process and free all resources. + */ + dprintk((MYIOC_s_WARN_FMT "mpt_timer_expired complete!\n", ioc->name)); + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mpt_ioc_reset - Base cleanup for hard reset + * @ioc: Pointer to the adapter structure + * @reset_phase: Indicates pre- or post-reset functionality + * + * Remark: Free's resources with internally generated commands. */ +static int +mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + CONFIGPARMS *pCfg; + unsigned long flags; -#define PROC_MPT_READ_RETURN(page,start,off,count,eof,len) \ -{ \ - len -= off; \ - if (len < count) { \ - *eof = 1; \ - if (len <= 0) \ - return 0; \ - } else \ - len = count; \ - *start = page + off; \ - return len; \ + dprintk((KERN_WARNING MYNAM + ": IOC %s_reset routed to MPT base driver!\n", + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); + + if (reset_phase == MPT_IOC_PRE_RESET) { + /* If the internal config Q is not empty - + * delete timer. MF resources will be freed when + * the FIFO's are primed. + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&ioc->configQ)){ + pCfg = (CONFIGPARMS *)ioc->configQ.head; + do { + del_timer(&pCfg->timer); + pCfg = (CONFIGPARMS *) (pCfg->linkage.forw); + } while (pCfg != (CONFIGPARMS *)&ioc->configQ); + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + } else { + CONFIGPARMS *pNext; + + /* Search the configQ for internal commands. + * Flush the Q, and wake up all suspended threads. + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&ioc->configQ)){ + pCfg = (CONFIGPARMS *)ioc->configQ.head; + do { + pNext = (CONFIGPARMS *) pCfg->linkage.forw; + + Q_DEL_ITEM(&pCfg->linkage); + + pCfg->status = MPT_CONFIG_ERROR; + pCfg->wait_done = 1; + wake_up(&mpt_waitq); + + pCfg = pNext; + } while (pCfg != (CONFIGPARMS *)&ioc->configQ); + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + } + + return 1; /* currently means nothing really */ } + +#ifdef CONFIG_PROC_FS /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff... + */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries. @@ -2894,71 +4467,62 @@ static int procmpt_create(void) { - MPT_ADAPTER *ioc; - struct proc_dir_entry *ent; - int errcnt = 0; + MPT_ADAPTER *ioc; + struct proc_dir_entry *ent; + int ii; /* - * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" - * (single level) to multi level (e.g. "driver/message/fusion") - * something here needs to change. -sralston + * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" + * (single level) to multi level (e.g. "driver/message/fusion") + * something here needs to change. -sralston */ - procmpt_root_dir = CREATE_PROCDIR_ENTRY(MPT_PROCFS_MPTBASEDIR, NULL); - if (procmpt_root_dir == NULL) + mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL); + if (mpt_proc_root_dir == NULL) return -ENOTDIR; - if ((ioc = mpt_adapter_find_first()) != NULL) { - ent = create_proc_read_entry(MPT_PROCFS_SUMMARY_NODE, 0, NULL, procmpt_read_summary, NULL); - if (ent == NULL) { - printk(KERN_WARNING MYNAM ": WARNING - Could not create %s entry!\n", - MPT_PROCFS_SUMMARY_PATHNAME); - errcnt++; + for (ii=0; ii < MPT_PROC_ENTRIES; ii++) { + ent = create_proc_entry(mpt_proc_list[ii].name, + S_IFREG|S_IRUGO, mpt_proc_root_dir); + if (!ent) { + printk(KERN_WARNING MYNAM + ": WARNING - Could not create /proc/mpt/%s entry\n", + mpt_proc_list[ii].name); + continue; } + ent->read_proc = mpt_proc_list[ii].f; + ent->data = NULL; } + ioc = mpt_adapter_find_first(); while (ioc != NULL) { - char pname[32]; - int namelen; + struct proc_dir_entry *dent; /* * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter. */ - namelen = sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); - if ((ent = CREATE_PROCDIR_ENTRY(pname, NULL)) != NULL) { + if ((dent = proc_mkdir(ioc->name, mpt_proc_root_dir)) != NULL) { /* - * And populate it with: "summary" and "dbg" file entries. + * And populate it with mpt_ioc_proc_list[] entries. */ - (void) sprintf(pname+namelen, "/summary"); - ent = create_proc_read_entry(pname, 0, NULL, procmpt_read_summary, ioc); - if (ent == NULL) { - errcnt++; - printk(KERN_WARNING MYNAM ": %s: WARNING - Could not create /proc/%s entry!\n", - ioc->name, pname); - } -//#ifdef MPT_DEBUG - /* DEBUG aid! */ - (void) sprintf(pname+namelen, "/dbg"); - ent = create_proc_read_entry(pname, 0, NULL, procmpt_read_dbg, ioc); - if (ent == NULL) { - errcnt++; - printk(KERN_WARNING MYNAM ": %s: WARNING - Could not create /proc/%s entry!\n", - ioc->name, pname); + for (ii=0; ii < MPT_IOC_PROC_ENTRIES; ii++) { + ent = create_proc_entry(mpt_ioc_proc_list[ii].name, + S_IFREG|S_IRUGO, dent); + if (!ent) { + printk(KERN_WARNING MYNAM + ": WARNING - Could not create /proc/mpt/%s/%s entry!\n", + ioc->name, + mpt_ioc_proc_list[ii].name); + continue; + } + ent->read_proc = mpt_ioc_proc_list[ii].f; + ent->data = ioc; } -//#endif } else { - errcnt++; - printk(KERN_WARNING MYNAM ": %s: WARNING - Could not create /proc/%s entry!\n", - ioc->name, pname); - + printk(MYIOC_s_WARN_FMT "Could not create /proc/mpt/%s subdir entry!\n", + ioc->name, mpt_ioc_proc_list[ii].name); } - ioc = mpt_adapter_find_next(ioc); } - if (errcnt) { -// remove_proc_entry("mpt", 0); - return -ENOTDIR; - } - return 0; } @@ -2971,44 +4535,44 @@ static int procmpt_destroy(void) { - MPT_ADAPTER *ioc; + MPT_ADAPTER *ioc; + int ii; - if (!procmpt_root_dir) + if (!mpt_proc_root_dir) return 0; /* - * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" - * (single level) to multi level (e.g. "driver/message/fusion") - * something here needs to change. -sralston + * BEWARE: If/when MPT_PROCFS_MPTBASEDIR changes from "mpt" + * (single level) to multi level (e.g. "driver/message/fusion") + * something here needs to change. -sralston */ ioc = mpt_adapter_find_first(); - if (ioc != NULL) { - remove_proc_entry(MPT_PROCFS_SUMMARY_NODE, 0); - } - while (ioc != NULL) { char pname[32]; int namelen; + + namelen = sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); + /* * Tear down each "/proc/mpt/iocN" subdirectory. */ - namelen = sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); - (void) sprintf(pname+namelen, "/summary"); - remove_proc_entry(pname, 0); -//#ifdef MPT_DEBUG - (void) sprintf(pname+namelen, "/dbg"); - remove_proc_entry(pname, 0); -//#endif - (void) sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); - remove_proc_entry(pname, 0); + for (ii=0; ii < MPT_IOC_PROC_ENTRIES; ii++) { + (void) sprintf(pname+namelen, "/%s", mpt_ioc_proc_list[ii].name); + remove_proc_entry(pname, NULL); + } + + remove_proc_entry(ioc->name, mpt_proc_root_dir); ioc = mpt_adapter_find_next(ioc); } - if (atomic_read((atomic_t *)&procmpt_root_dir->count) == 0) { - remove_proc_entry(MPT_PROCFS_MPTBASEDIR, 0); - procmpt_root_dir = NULL; + for (ii=0; ii < MPT_PROC_ENTRIES; ii++) + remove_proc_entry(mpt_proc_list[ii].name, mpt_proc_root_dir); + + if (atomic_read((atomic_t *)&mpt_proc_root_dir->count) == 0) { + remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL); + mpt_proc_root_dir = NULL; return 0; } @@ -3016,23 +4580,23 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * procmpt_read_summary - Handle read request from /proc/mpt/summary +/* + * procmpt_summary_read - Handle read request from /proc/mpt/summary * or from /proc/mpt/iocN/summary. - * @page: Pointer to area to write information + * @buf: Pointer to area to write information * @start: Pointer to start pointer - * @off: Offset to start writing - * @count: + * @offset: Offset to start writing + * @request: * @eof: Pointer to EOF integer - * @data: Pointer + * @data: Pointer * - * Returns numbers of characters written to process performing the read. + * Returns number of characters written to process performing the read. */ static int -procmpt_read_summary(char *page, char **start, off_t off, int count, int *eof, void *data) +procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eof, void *data) { MPT_ADAPTER *ioc; - char *out = page; + char *out = buf; int len; if (data == NULL) @@ -3040,84 +4604,196 @@ else ioc = data; -// Too verbose! -// out += sprintf(out, "Attached Fusion MPT I/O Controllers:%s\n", ioc ? "" : " none"); - while (ioc) { int more = 0; -// Too verbose! -// mpt_print_ioc_facts(ioc, out, &more, 0); mpt_print_ioc_summary(ioc, out, &more, 0, 1); out += more; - if ((out-page) >= count) { + if ((out-buf) >= request) { break; } if (data == NULL) ioc = mpt_adapter_find_next(ioc); else - ioc = NULL; /* force exit for iocN */ + ioc = NULL; /* force exit for iocN */ } - len = out - page; + len = out - buf; - PROC_MPT_READ_RETURN(page,start,off,count,eof,len); + MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len); } -// debug aid! /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * procmpt_read_dbg - Handle read request from /proc/mpt/iocN/dbg. - * @page: Pointer to area to write information +/* + * procmpt_version_read - Handle read request from /proc/mpt/version. + * @buf: Pointer to area to write information * @start: Pointer to start pointer - * @off: Offset to start writing - * @count: + * @offset: Offset to start writing + * @request: * @eof: Pointer to EOF integer - * @data: Pointer + * @data: Pointer * - * Returns numbers of characters written to process performing the read. + * Returns number of characters written to process performing the read. */ static int -procmpt_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data) +procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - MPT_ADAPTER *ioc; - char *out = page; - int len; + int ii; + int scsi, lan, ctl, targ, dmp; + char *drvname; + int len; + + len = sprintf(buf, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON); + len += sprintf(buf+len, " Fusion MPT base driver\n"); + + scsi = lan = ctl = targ = dmp = 0; + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + drvname = NULL; + if (MptCallbacks[ii]) { + switch (MptDriverClass[ii]) { + case MPTSCSIH_DRIVER: + if (!scsi++) drvname = "SCSI host"; + break; + case MPTLAN_DRIVER: + if (!lan++) drvname = "LAN"; + break; + case MPTSTM_DRIVER: + if (!targ++) drvname = "SCSI target"; + break; + case MPTCTL_DRIVER: + if (!ctl++) drvname = "ioctl"; + break; + case MPTDMP_DRIVER: + if (!dmp++) drvname = "DMP"; + break; + } - ioc = data; + if (drvname) + len += sprintf(buf+len, " Fusion MPT %s driver\n", drvname); + /* + * Handle isense special case, because it + * doesn't do a formal mpt_register call. + */ + if (isense_idx == ii) + len += sprintf(buf+len, " Fusion MPT isense driver\n"); + } else + break; + } - while (ioc) { - int more = 0; + MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len); +} - mpt_print_ioc_facts(ioc, out, &more, 0); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info. + * @buf: Pointer to area to write information + * @start: Pointer to start pointer + * @offset: Offset to start writing + * @request: + * @eof: Pointer to EOF integer + * @data: Pointer + * + * Returns number of characters written to process performing the read. + */ +static int +procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eof, void *data) +{ + MPT_ADAPTER *ioc = data; + int len; + char expVer[32]; + int sz; + int p; - out += more; - if ((out-page) >= count) { - break; + mpt_get_fw_exp_ver(expVer, ioc); + + len = sprintf(buf, "%s:", ioc->name); + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) + len += sprintf(buf+len, " (f/w download boot flag set)"); +// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL) +// len += sprintf(buf+len, " CONFIG_CHECKSUM_FAIL!"); + + len += sprintf(buf+len, "\n ProductID = 0x%04x (%s)\n", + ioc->facts.ProductID, + ioc->prod_name); + len += sprintf(buf+len, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer); + if (ioc->facts.FWImageSize) + len += sprintf(buf+len, " (fw_size=%d)", ioc->facts.FWImageSize); + len += sprintf(buf+len, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion); + len += sprintf(buf+len, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit); + len += sprintf(buf+len, " EventState = 0x%02x\n", ioc->facts.EventState); + + len += sprintf(buf+len, " CurrentHostMfaHighAddr = 0x%08x\n", + ioc->facts.CurrentHostMfaHighAddr); + len += sprintf(buf+len, " CurrentSenseBufferHighAddr = 0x%08x\n", + ioc->facts.CurrentSenseBufferHighAddr); + + len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); + len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + ioc->req_alloc, (void *)(ulong)ioc->req_alloc_dma); + /* + * Rounding UP to nearest 4-kB boundary here... + */ + sz = (ioc->req_sz * ioc->req_depth) + 128; + sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000; + len += sprintf(buf+len, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n", + ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz); + len += sprintf(buf+len, " {MaxReqSz=%d} {MaxReqDepth=%d}\n", + 4*ioc->facts.RequestFrameSize, + ioc->facts.GlobalCredits); + + len += sprintf(buf+len, " ReplyFrames @ 0x%p (Dma @ 0x%p)\n", + ioc->reply_alloc, (void *)(ulong)ioc->reply_alloc_dma); + sz = (ioc->reply_sz * ioc->reply_depth) + 128; + len += sprintf(buf+len, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", + ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); + len += sprintf(buf+len, " {MaxRepSz=%d} {MaxRepDepth=%d}\n", + ioc->facts.CurReplyFrameSize, + ioc->facts.ReplyQueueDepth); + + len += sprintf(buf+len, " MaxDevices = %d\n", + (ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices); + len += sprintf(buf+len, " MaxBuses = %d\n", ioc->facts.MaxBuses); + + /* per-port info */ + for (p=0; p < ioc->facts.NumberOfPorts; p++) { + len += sprintf(buf+len, " PortNumber = %d (of %d)\n", + p+1, + ioc->facts.NumberOfPorts); + if ((int)ioc->chip_type <= (int)FC929) { + if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) { + u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; + len += sprintf(buf+len, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + a[5], a[4], a[3], a[2], a[1], a[0]); + } + len += sprintf(buf+len, " WWN = %08X%08X:%08X%08X\n", + ioc->fc_port_page0[p].WWNN.High, + ioc->fc_port_page0[p].WWNN.Low, + ioc->fc_port_page0[p].WWPN.High, + ioc->fc_port_page0[p].WWPN.Low); } - ioc = NULL; } - len = out - page; - PROC_MPT_READ_RETURN(page,start,off,count,eof,len); + MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len); } + #endif /* CONFIG_PROC_FS } */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc) { - if ((ioc->facts.FWVersion & 0xF000) == 0xE000) + buf[0] ='\0'; + if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) { sprintf(buf, " (Exp %02d%02d)", - (ioc->facts.FWVersion & 0x0F00) >> 8, /* Month */ - ioc->facts.FWVersion & 0x001F); /* Day */ - else - buf[0] ='\0'; + (ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */ + (ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */ - /* insider hack! */ - if (ioc->facts.FWVersion & 0x0080) { - strcat(buf, " [MDBG]"); + /* insider hack! */ + if ((ioc->facts.FWVersion.Word >> 8) & 0x80) + strcat(buf, " [MDBG]"); } } @@ -3130,8 +4806,8 @@ * @len: Offset at which to start writing in buffer * @showlan: Display LAN stuff? * - * This routine writes (english readable) ASCII text, which represents - * a summary of IOC information, to a buffer. + * This routine writes (english readable) ASCII text, which represents + * a summary of IOC information, to a buffer. */ void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan) @@ -3144,11 +4820,11 @@ /* * Shorter summary of attached ioc's... */ - y = sprintf(buffer+len, "%s: %s, %s%04xh%s, Ports=%d, MaxQ=%d", + y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d", ioc->name, ioc->prod_name, MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */ - ioc->facts.FWVersion, + ioc->facts.FWVersion.Word, expVer, ioc->facts.NumberOfPorts, ioc->req_depth); @@ -3159,8 +4835,11 @@ a[5], a[4], a[3], a[2], a[1], a[0]); } - if (ioc->pci_irq < 100) - y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq); +#ifndef __sparc__ + y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq); +#else + y += sprintf(buffer+len+y, ", IRQ=%s", __irq_itoa(ioc->pci_irq)); +#endif if (!ioc->active) y += sprintf(buffer+len+y, " (disabled)"); @@ -3171,75 +4850,66 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Reset Handling + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_print_ioc_facts - Write ASCII summary of IOC facts to a buffer. + * mpt_HardResetHandler - Generic reset handler, issue SCSI Task + * Management call based on input arg values. If TaskMgmt fails, + * return associated SCSI request. * @ioc: Pointer to MPT_ADAPTER structure - * @buffer: Pointer to buffer where IOC facts should be written - * @size: Pointer to number of bytes we wrote (set by this routine) - * @len: Offset at which to start writing in buffer + * @sleepFlag: Indicates if sleep or schedule must be called. * - * This routine writes (english readable) ASCII text, which represents - * a summary of the IOC facts, to a buffer. + * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) + * or a non-interrupt thread. In the former, must not call schedule(). + * + * Remark: A return of -1 is a FATAL error case, as it means a + * FW reload/initialization failed. + * + * Returns 0 for SUCCESS or -1 if FAILED. */ -void -mpt_print_ioc_facts(MPT_ADAPTER *ioc, char *buffer, int *size, int len) +int +mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) { - char expVer[32]; - char iocName[16]; - int sz; - int y; - int p; - - mpt_get_fw_exp_ver(expVer, ioc); + int rc; + unsigned long flags; - strcpy(iocName, ioc->name); - y = sprintf(buffer+len, "%s:\n", iocName); + dprintk((MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name)); +#ifdef MFCNT + printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name); + printk("MF count 0x%x !\n", ioc->mfcnt); +#endif - y += sprintf(buffer+len+y, " ProductID = 0x%04x\n", ioc->facts.ProductID); - for (p=0; p < ioc->facts.NumberOfPorts; p++) { - y += sprintf(buffer+len+y, " PortNumber = %d (of %d)\n", - p+1, - ioc->facts.NumberOfPorts); - if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) { - u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; - y += sprintf(buffer+len+y, " LanAddr = 0x%02x:%02x:%02x:%02x:%02x:%02x\n", - a[5], a[4], a[3], a[2], a[1], a[0]); - } - } - y += sprintf(buffer+len+y, " FWVersion = 0x%04x%s\n", ioc->facts.FWVersion, expVer); - y += sprintf(buffer+len+y, " MsgVersion = 0x%04x\n", ioc->facts.MsgVersion); - y += sprintf(buffer+len+y, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit); - y += sprintf(buffer+len+y, " EventState = 0x%02x\n", ioc->facts.EventState); - y += sprintf(buffer+len+y, " CurrentHostMfaHighAddr = 0x%08x\n", - ioc->facts.CurrentHostMfaHighAddr); - y += sprintf(buffer+len+y, " CurrentSenseBufferHighAddr = 0x%08x\n", - ioc->facts.CurrentSenseBufferHighAddr); - y += sprintf(buffer+len+y, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); - y += sprintf(buffer+len+y, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + /* Reset the adapter. Prevent more than 1 call to + * mpt_do_ioc_recovery at any instant in time. + */ + spin_lock_irqsave(&ioc->diagLock, flags); + if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ + spin_unlock_irqrestore(&ioc->diagLock, flags); + return 0; + } else { + ioc->diagPending = 1; + } + spin_unlock_irqrestore(&ioc->diagLock, flags); - y += sprintf(buffer+len+y, " RequestFrames @ 0x%p (Dma @ 0x%08x)\n", - ioc->req_alloc, ioc->req_alloc_dma); - /* - * Rounding UP to nearest 4-kB boundary here... + /* FIXME: If do_ioc_recovery fails, repeat.... */ - sz = (ioc->req_sz * ioc->req_depth) + 128; - sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000; - y += sprintf(buffer+len+y, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n", - ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz); - y += sprintf(buffer+len+y, " {MaxReqSz=%d} {MaxReqDepth=%d}\n", - 4*ioc->facts.RequestFrameSize, - ioc->facts.GlobalCredits); - y += sprintf(buffer+len+y, " ReplyFrames @ 0x%p (Dma @ 0x%08x)\n", - ioc->reply_alloc, ioc->reply_alloc_dma); - sz = (ioc->reply_sz * ioc->reply_depth) + 128; - y += sprintf(buffer+len+y, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n", - ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz); - y += sprintf(buffer+len+y, " {MaxRepSz=%d} {MaxRepDepth=%d}\n", - ioc->facts.CurReplyFrameSize, - ioc->facts.ReplyQueueDepth); + if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { + printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n", + rc, ioc->name); + } - *size = y; + spin_lock_irqsave(&ioc->diagLock, flags); + ioc->diagPending = 0; + if (ioc->alt_ioc) + ioc->alt_ioc->diagPending = 0; + spin_unlock_irqrestore(&ioc->diagLock, flags); + + dprintk((MYIOC_s_INFO_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); + + return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -3268,7 +4938,7 @@ ds = "External Bus Reset"; break; case MPI_EVENT_RESCAN: - ds = "Bus Rescan Event"; + ds = "Bus Rescan Event"; /* Ok, do we need to do anything here? As far as I can tell, this is when a new device gets added to the loop. */ @@ -3296,6 +4966,9 @@ else ds = "Events(OFF) Change"; break; + case MPI_EVENT_INTEGRATED_RAID: + ds = "Integrated Raid"; + break; /* * MPT base "custom" events may be added here... */ @@ -3307,7 +4980,7 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * ProcessEventNotification - Route a received EventNotificationReply to * all currently regeistered event handlers. * @ioc: Pointer to MPT_ADAPTER structure @@ -3322,7 +4995,7 @@ u16 evDataLen; u32 evData0 = 0; // u32 evCtx; - int i; + int ii; int r = 0; int handlers = 0; char *evStr; @@ -3339,15 +5012,15 @@ } evStr = EventDescriptionStr(event, evData0); - dprintk((KERN_INFO MYNAM ": %s: MPT event (%s=%02Xh) detected!\n", + dprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n", ioc->name, evStr, event)); #if defined(MPT_DEBUG) || defined(MPT_DEBUG_EVENTS) printk(KERN_INFO MYNAM ": Event data:\n" KERN_INFO); - for (i = 0; i < evDataLen; i++) - printk(" %08x", le32_to_cpu(pEventReply->Data[i])); + for (ii = 0; ii < evDataLen; ii++) + printk(" %08x", le32_to_cpu(pEventReply->Data[ii])); printk("\n"); #endif @@ -3365,6 +5038,8 @@ case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */ case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */ case MPI_EVENT_LOGOUT: /* 09 */ + case MPI_EVENT_INTEGRATED_RAID: /* 0B */ + case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */ default: break; case MPI_EVENT_EVENT_CHANGE: /* 0A */ @@ -3382,13 +5057,36 @@ } /* + * Should this event be logged? Events are written sequentially. + * When buffer is full, start again at the top. + */ + if (ioc->events && (ioc->eventTypes & ( 1 << event))) { + int idx; + + idx = ioc->eventContext % ioc->eventLogSize; + + ioc->events[idx].event = event; + ioc->events[idx].eventContext = ioc->eventContext; + + for (ii = 0; ii < 2; ii++) { + if (ii < evDataLen) + ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]); + else + ioc->events[idx].data[ii] = 0; + } + + ioc->eventContext++; + } + + + /* * Call each currently registered protocol event handler. */ - for (i=MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) { - if (MptEvHandlers[i]) { - dprintk((KERN_INFO MYNAM ": %s: Routing Event to event handler #%d\n", - ioc->name, i)); - r += (*(MptEvHandlers[i]))(ioc, pEventReply); + for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) { + if (MptEvHandlers[ii]) { + dprintk((MYIOC_s_INFO_FMT "Routing Event to event handler #%d\n", + ioc->name, ii)); + r += (*(MptEvHandlers[ii]))(ioc, pEventReply); handlers++; } } @@ -3398,7 +5096,9 @@ * If needed, send (a single) EventAck. */ if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) { - if ((i = SendEventAck(ioc, pEventReply)) != 0) { + if ((ii = SendEventAck(ioc, pEventReply)) != 0) { + printk(MYIOC_s_WARN_FMT "SendEventAck returned %d\n", + ioc->name, ii); } } @@ -3427,7 +5127,7 @@ switch(log_info) { /* FCP Initiator */ - case MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME: + case MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME: desc = "Received an out of order frame - unsupported"; break; case MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_START_OF_FRAME: @@ -3483,7 +5183,7 @@ desc = "Not sent because login to remote node not validated"; break; case MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND: - desc = "Cleared from the outbound after a logout"; + desc = "Cleared from the outbound queue after a logout"; break; case MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN: desc = "Cleared waiting for data after a logout"; @@ -3516,7 +5216,7 @@ break; } - printk(KERN_INFO MYNAM ": %s: LogInfo(0x%08x): SubCl={%s}", + printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubCl={%s}", ioc->name, log_info, subcl_str[subcl]); if (SubCl == MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET) printk(", byte_offset=%d\n", log_info & MPI_IOCLOGINFO_FC_INVALID_FIELD_MAX_OFFSET); @@ -3539,7 +5239,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info) { /* FIXME! */ - printk(KERN_INFO MYNAM ": %s: LogInfo(0x%08x)\n", ioc->name, log_info); + printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x)\n", ioc->name, log_info); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -3553,7 +5253,7 @@ * Specialized driver registration routine for the isense driver. */ int -mpt_register_ascqops_strings(/*ASCQ_Table_t*/void *ascqTable, int ascqtbl_sz, const char **opsTable) +mpt_register_ascqops_strings(void *ascqTable, int ascqtbl_sz, const char **opsTable) { int r = 0; @@ -3562,6 +5262,7 @@ mpt_ASCQ_TableSz = ascqtbl_sz; mpt_ScsiOpcodesPtr = opsTable; printk(KERN_INFO MYNAM ": English readable SCSI-3 strings enabled:-)\n"); + isense_idx = last_drv_idx; r = 1; } MOD_INC_USE_COUNT; @@ -3582,11 +5283,15 @@ mpt_ASCQ_TableSz = 0; mpt_ScsiOpcodesPtr = NULL; printk(KERN_INFO MYNAM ": English readable SCSI-3 strings disabled)-:\n"); + isense_idx = -1; MOD_DEC_USE_COUNT; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +EXPORT_SYMBOL(mpt_adapters); +EXPORT_SYMBOL(mpt_proc_root_dir); +EXPORT_SYMBOL(DmpService); EXPORT_SYMBOL(mpt_register); EXPORT_SYMBOL(mpt_deregister); EXPORT_SYMBOL(mpt_event_register); @@ -3597,12 +5302,16 @@ EXPORT_SYMBOL(mpt_put_msg_frame); EXPORT_SYMBOL(mpt_free_msg_frame); EXPORT_SYMBOL(mpt_send_handshake_request); +EXPORT_SYMBOL(mpt_handshake_req_reply_wait); EXPORT_SYMBOL(mpt_adapter_find_first); EXPORT_SYMBOL(mpt_adapter_find_next); EXPORT_SYMBOL(mpt_verify_adapter); +EXPORT_SYMBOL(mpt_GetIocState); EXPORT_SYMBOL(mpt_print_ioc_summary); EXPORT_SYMBOL(mpt_lan_index); EXPORT_SYMBOL(mpt_stm_index); +EXPORT_SYMBOL(mpt_HardResetHandler); +EXPORT_SYMBOL(mpt_config); EXPORT_SYMBOL(mpt_register_ascqops_strings); EXPORT_SYMBOL(mpt_deregister_ascqops_strings); @@ -3611,12 +5320,13 @@ EXPORT_SYMBOL(mpt_ScsiOpcodesPtr); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * fusion_init - Fusion MPT base driver initialization routine. * * Returns 0 for success, non-zero for failure. */ -int __init fusion_init(void) +int __init +fusion_init(void) { int i; @@ -3636,12 +5346,22 @@ MptResetHandlers[i] = NULL; } + DmpService = NULL; + /* NEW! 20010120 -sralston * Register ourselves (mptbase) in order to facilitate * EventNotification handling. */ mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); + /* Register for hard reset handling callbacks. + */ + if (mpt_reset_register(mpt_base_index, mpt_ioc_reset) == 0) { + dprintk((KERN_INFO MYNAM ": Register for IOC reset notification\n")); + } else { + /* FIXME! */ + } + if ((i = mpt_pci_scan()) < 0) return i; @@ -3649,13 +5369,14 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** +/* * fusion_exit - Perform driver unload cleanup. * * This routine frees all resources associated with each MPT adapter * and removes all %MPT_PROCFS_MPTBASEDIR entries. */ -static void fusion_exit(void) +static void +fusion_exit(void) { MPT_ADAPTER *this; @@ -3665,7 +5386,7 @@ * Moved this *above* removal of all MptAdapters! */ #ifdef CONFIG_PROC_FS - procmpt_destroy(); + (void) procmpt_destroy(); #endif while (! Q_IS_EMPTY(&MptAdapters)) { @@ -3673,6 +5394,8 @@ Q_DEL_ITEM(this); mpt_adapter_dispose(this); } + + mpt_reset_deregister(mpt_base_index); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff -Nru a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h --- a/drivers/message/fusion/mptbase.h Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/mptbase.h Thu May 30 21:28:58 2002 @@ -8,11 +8,12 @@ * Credits: * (see mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptbase.h,v 1.46.2.2.2.2 2001/09/18 03:22:29 sralston Exp $ + * $Id: mptbase.h,v 1.103 2002/02/27 20:24:38 pdelaney Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -55,6 +56,7 @@ /*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #include "linux_compat.h" /* linux-2.2.x (vs. -2.4.x) tweaks */ +#include "scsi3.h" /* SCSI defines */ #include "lsi/mpi_type.h" #include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */ @@ -62,6 +64,7 @@ #include "lsi/mpi_cnfg.h" /* IOC configuration support */ #include "lsi/mpi_init.h" /* SCSI Host (initiator) protocol support */ #include "lsi/mpi_lan.h" /* LAN over FC protocol support */ +#include "lsi/mpi_raid.h" /* Integrated Mirroring support */ #include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */ #include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */ @@ -74,11 +77,11 @@ #endif #ifndef COPYRIGHT -#define COPYRIGHT "Copyright (c) 1999-2001 " MODULEAUTHOR +#define COPYRIGHT "Copyright (c) 1999-2002 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "1.02.02" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-1.02.02" +#define MPT_LINUX_VERSION_COMMON "2.00.11" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-2.00.11" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -89,39 +92,77 @@ * Fusion MPT(linux) driver configurable stuff... */ #define MPT_MAX_ADAPTERS 16 -#define MPT_MAX_PROTOCOL_DRIVERS 8 +#define MPT_MAX_PROTOCOL_DRIVERS 16 +#define MPT_MAX_BUS 1 #define MPT_MAX_FC_DEVICES 255 +#define MPT_MAX_SCSI_DEVICES 16 +#define MPT_LAST_LUN 31 +#define MPT_SENSE_BUFFER_ALLOC 64 + /* allow for 256 max sense alloc, but only 255 max request */ +#if MPT_SENSE_BUFFER_ALLOC >= 256 +# undef MPT_SENSE_BUFFER_ALLOC +# define MPT_SENSE_BUFFER_ALLOC 256 +# define MPT_SENSE_BUFFER_SIZE 255 +#else +# define MPT_SENSE_BUFFER_SIZE MPT_SENSE_BUFFER_ALLOC +#endif -#define MPT_MISCDEV_BASENAME "mptctl" -#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME +#define MPT_NAME_LENGTH 32 #define MPT_PROCFS_MPTBASEDIR "mpt" /* chg it to "driver/fusion" ? */ -#define MPT_PROCFS_SUMMARY_NODE MPT_PROCFS_MPTBASEDIR "/summary" -#define MPT_PROCFS_SUMMARY_PATHNAME "/proc/" MPT_PROCFS_SUMMARY_NODE -#define MPT_FW_REV_MAGIC_ID_STRING "FwRev=" +#define MPT_PROCFS_SUMMARY_ALL_NODE MPT_PROCFS_MPTBASEDIR "/summary" +#define MPT_PROCFS_SUMMARY_ALL_PATHNAME "/proc/" MPT_PROCFS_SUMMARY_ALL_NODE +#define MPT_FW_REV_MAGIC_ID_STRING "FwRev=" -#ifdef __KERNEL__ /* { */ #define MPT_MAX_REQ_DEPTH 1023 -#define MPT_REQ_DEPTH 256 +#define MPT_DEFAULT_REQ_DEPTH 256 #define MPT_MIN_REQ_DEPTH 128 #define MPT_MAX_REPLY_DEPTH MPT_MAX_REQ_DEPTH -#define MPT_REPLY_DEPTH 128 +#define MPT_DEFAULT_REPLY_DEPTH 128 #define MPT_MIN_REPLY_DEPTH 8 #define MPT_MAX_REPLIES_PER_ISR 32 #define MPT_MAX_FRAME_SIZE 128 -#define MPT_REQ_SIZE 128 -#define MPT_REPLY_SIZE 128 +#define MPT_DEFAULT_FRAME_SIZE 128 -#define MPT_SG_BUCKETS_PER_HUNK 1 +#define MPT_SG_REQ_128_SCALE 1 +#define MPT_SG_REQ_96_SCALE 2 +#define MPT_SG_REQ_64_SCALE 4 -#ifdef MODULE -#define MPT_REQ_DEPTH_RANGE_STR __MODULE_STRING(MPT_MIN_REQ_DEPTH) "-" __MODULE_STRING(MPT_MAX_REQ_DEPTH) -#define MPT_REPLY_DEPTH_RANGE_STR __MODULE_STRING(MPT_MIN_REPLY_DEPTH) "-" __MODULE_STRING(MPT_MAX_REPLY_DEPTH) -#define MPT_REPLY_SIZE_RANGE_STR __MODULE_STRING(MPT_MIN_REPLY_SIZE) "-" __MODULE_STRING(MPT_MAX_FRAME_SIZE) -#endif +#define CAN_SLEEP 1 +#define NO_SLEEP 0 + +/* + * SCSI transfer rate defines. + */ +#define MPT_ULTRA320 0x08 +#define MPT_ULTRA160 0x09 +#define MPT_ULTRA2 0x0A +#define MPT_ULTRA 0x0C +#define MPT_FAST 0x19 +#define MPT_SCSI 0x32 +#define MPT_ASYNC 0xFF + +#define MPT_NARROW 0 +#define MPT_WIDE 1 + +#ifdef __KERNEL__ /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Attempt semi-consistent error & warning msgs across + * MPT drivers. NOTE: Users of these macro defs must + * themselves define their own MYNAM. + */ +#define MYIOC_s_INFO_FMT KERN_INFO MYNAM ": %s: " +#define MYIOC_s_NOTE_FMT KERN_NOTICE MYNAM ": %s: " +#define MYIOC_s_WARN_FMT KERN_WARNING MYNAM ": %s: WARNING - " +#define MYIOC_s_ERR_FMT KERN_ERR MYNAM ": %s: ERROR - " /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -133,6 +174,7 @@ MPTSCSIH_DRIVER, /* MPT SCSI host (initiator) class */ MPTLAN_DRIVER, /* MPT LAN class */ MPTSTM_DRIVER, /* MPT SCSI target mode class */ + MPTDMP_DRIVER, /* MPT Dynamic Multi-pathing class */ MPTUNKNOWN_DRIVER } MPT_DRIVER_CLASS; @@ -145,10 +187,21 @@ struct _MPT_FRAME_HDR *forw; struct _MPT_FRAME_HDR *back; u32 arg1; + u32 pad; void *argp1; +#ifndef MPT_SCSI_USE_NEW_EH + void *argp2; +#endif } linkage; /* - * NOTE: On non-32-bit systems, where pointers are LARGE, + * NOTE: When request frames are free, on the linkage structure + * contets are valid. All other values are invalid. + * In particular, do NOT reply on offset [2] + * (in words) being the * message context. + * The message context must be reset (computed via base address + * + an offset) prior to issuing any command. + * + * NOTE2: On non-32-bit systems, where pointers are LARGE, * using the linkage pointers destroys our sacred MsgContext * field contents. But we don't care anymore because these * are now reset in mpt_put_msg_frame() just prior to sending @@ -169,6 +222,12 @@ } fld; } msgctxu; } hwhdr; + /* + * Remark: 32 bit identifier: + * 31-24: reserved + * 23-16: call back index + * 15-0 : request index + */ } MPT_FRAME_TRACKER; /* @@ -189,6 +248,11 @@ } u; } MPT_FRAME_HDR; +#define MPT_REQ_MSGFLAGS_DROPME 0x80 + +/* Used for tracking the free request frames + * and free reply frames. + */ typedef struct _MPT_Q_TRACKER { MPT_FRAME_HDR *head; MPT_FRAME_HDR *tail; @@ -214,9 +278,20 @@ struct _Q_ITEM *tail; } Q_TRACKER; +typedef struct _MPT_DONE_Q { + struct _MPT_DONE_Q *forw; + struct _MPT_DONE_Q *back; + void *argp; +} MPT_DONE_Q; + +typedef struct _DONE_Q_TRACKER { + MPT_DONE_Q *head; + MPT_DONE_Q *tail; +} DONE_Q_TRACKER; /* - * Chip-specific stuff... + * Chip-specific stuff... FC929 delineates break between + * FC and Parallel SCSI parts. Do NOT re-order. */ typedef enum { @@ -237,7 +312,9 @@ u32 WriteSequence; /* 04 Write Sequence register */ u32 Diagnostic; /* 08 Diagnostic register */ u32 TestBase; /* 0C Test Base Address */ - u32 Reserved1[8]; /* 10-2F reserved for future use */ + u32 DiagRwData; /* 10 Read Write Data (fw download) */ + u32 DiagRwAddress; /* 14 Read Write Address (fw download)*/ + u32 Reserved1[6]; /* 18-2F reserved for future use */ u32 IntStatus; /* 30 Interrupt Status */ u32 IntMask; /* 34 Interrupt Mask */ u32 Reserved2[2]; /* 38-3F reserved for future use */ @@ -256,60 +333,271 @@ */ +/* + * Dynamic Multi-Pathing specific stuff... + */ +#define DMP_MAX_PATHS 8 + +typedef struct _PathInfo { + u8 ioc; + u8 target; + u8 pad; + u8 pflags; +} PathInfo; + +#define PATHINFO_FLAGS_OWNED 0x01 +#define PATHINFO_FLAGS_EXISTS 0x02 +#define PATHINFO_FLAGS_AVAILABLE 0x04 +#define PATHINFO_FLAGS_SECONDARY 0x08 + +#define PFLAGS_EXISTS_AND_AVAIL (PATHINFO_FLAGS_EXISTS|PATHINFO_FLAGS_AVAILABLE) +#define PFLAGS_AVAIL_AND_OWNED (PATHINFO_FLAGS_AVAILABLE|PATHINFO_FLAGS_OWNED) + +typedef struct _ScsiCmndTracker { + void *head; + void *tail; +} ScsiCmndTracker; + + +/* + * VirtDevice - FC LUN device or SCSI target device + * (used to be FCSCSI_TARGET) + */ +typedef struct _VirtDevice { + struct _VirtDevice *forw; + struct _VirtDevice *back; + rwlock_t VdevLock; + int ref_cnt; + u8 tflags; + u8 ioc_id; + u8 target_id; + u8 bus_id; + u8 minSyncFactor; /* 0xFF is async */ + u8 maxOffset; /* 0 if async */ + u8 maxWidth; /* 0 if narrow, 1 if wide*/ + u8 negoFlags; /* 0 if WDTR/SDTR allowed */ + u8 raidVolume; /* set, if RAID Volume */ + u8 rsvd; /* alignment */ + u16 rsvd1raid; + int npaths; + u16 fc_phys_lun; + u16 fc_xlat_lun; + int stall_detected; + PathInfo path[DMP_MAX_PATHS]; + struct timer_list stall_timer; + struct timer_list retry_timer; + struct timer_list gone_timer; + ScsiCmndTracker WaitQ; + ScsiCmndTracker SentQ; + ScsiCmndTracker DoneQ; +//--- LUN split here? + u8 sense[SCSI_STD_SENSE_BYTES]; /* 18 */ + u8 rsvd2[2]; /* alignment */ + u32 luns; /* Max LUNs is 32 */ + u8 inq_data[SCSI_STD_INQUIRY_BYTES]; /* 36 */ + u8 pad0[4]; + u8 uniq_prepad[8]; + u8 inq00_data[20]; + u8 pad1[4]; + /* IEEE Registered Extended Identifier + obtained via INQUIRY VPD page 0x83 */ + u8 uniq_data[20]; + u8 pad2[4]; + u8 inqC3_data[12]; + u8 pad3[4]; + u8 inqC9_data[12]; + u8 pad4[4]; + u8 dev_vol_name[64]; +} VirtDevice; + +/* + * Fibre Channel (SCSI) target device and associated defines... + */ +#define MPT_TARGET_DEFAULT_DV_STATUS 0 +#define MPT_TARGET_FLAGS_VALID_NEGO 0x01 +#define MPT_TARGET_FLAGS_VALID_INQUIRY 0x02 +#define MPT_TARGET_FLAGS_VALID_SENSE 0x04 +#define MPT_TARGET_FLAGS_Q_YES 0x08 + +#define MPT_TARGET_NO_NEGO_WIDE 0x01 +#define MPT_TARGET_NO_NEGO_SYNC 0x02 + +typedef struct _VirtDevTracker { + struct _VirtDevice *head; + struct _VirtDevice *tail; + rwlock_t VlistLock; + int pad; +} VirtDevTracker; + + +/* + * /proc/mpt interface + */ +typedef struct { + const char *name; + mode_t mode; + int pad; + read_proc_t *read_proc; + write_proc_t *write_proc; +} mpt_proc_entry_t; + +#define MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len) \ +do { \ + len -= offset; \ + if (len < request) { \ + *eof = 1; \ + if (len <= 0) \ + return 0; \ + } else \ + len = request; \ + *start = buf + offset; \ + return len; \ +} while (0) + + +/* + * IOCTL structure and associated defines + */ + +#define MPT_IOCTL_STATUS_DID_TIMEOUT 0x01 /* The current IOCTL timed out */ +#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ +#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */ +#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */ +#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ + +typedef struct _MPT_IOCTL { + struct _MPT_ADAPTER *ioc; + struct timer_list timer; /* timer function for this adapter */ + u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ + u8 sense[MPT_SENSE_BUFFER_ALLOC]; + int wait_done; /* wake-up value for this ioc */ + u8 cmd; /* current command */ + u8 status; /* current command status */ + u8 pad[2]; +} MPT_IOCTL; + +/* + * Event Structure and define + */ +#define MPTCTL_EVENT_LOG_SIZE (0x0000000A) +typedef struct _mpt_ioctl_events { + u32 event; /* Specified by define above */ + u32 eventContext; /* Index or counter */ + int data[2]; /* First 8 bytes of Event Data */ +} MPT_IOCTL_EVENTS; + +/* + * CONFIGPARM status defines + */ +#define MPT_CONFIG_GOOD MPI_IOCSTATUS_SUCCESS +#define MPT_CONFIG_ERROR 0x002F + +/* + * Substructure to store SCSI specific configuration page data + */ +#define MPT_SCSICFG_NEGOTIATE 0x01 /* Negotiate on next IO */ +#define MPT_SCSICFG_NEED_DV 0x02 /* Schedule DV */ +#define MPT_SCSICFG_DV_PENDING 0x04 /* DV on this physical id pending */ +#define MPT_SCSICFG_DV_DONE 0x08 /* DV on this physical id complete */ + +#define MPT_SCSICFG_USE_NVRAM 0x01 /* WriteSDP1 using NVRAM */ +#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */ + +typedef struct _ScsiCfgData { + int *nvram; /* table of device NVRAM values */ + IOCPage3_t *pIocPg3; /* table of physical disks */ + u8 dvStatus[MPT_MAX_SCSI_DEVICES]; + int isRaid; /* bit field, 1 if RAID */ + u8 minSyncFactor; /* 0xFF if async */ + u8 maxSyncOffset; /* 0 if async */ + u8 maxBusWidth; /* 0 if narrow, 1 if wide */ + u8 busType; /* SE, LVD, HD */ + u8 sdp1version; /* SDP1 version */ + u8 sdp1length; /* SDP1 length */ + u8 sdp0version; /* SDP0 version */ + u8 sdp0length; /* SDP0 length */ + u8 dvScheduled; /* 1 if scheduled */ + u8 forceDv; /* 1 to force DV scheduling */ + u8 rsvd[2]; +} ScsiCfgData; + +/* + * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS + */ typedef struct _MPT_ADAPTER { struct _MPT_ADAPTER *forw; struct _MPT_ADAPTER *back; - int id; /* Unique adapter id {0,1,2,...} */ - int pci_irq; - char name[32]; /* "iocN" */ + int id; /* Unique adapter id N {0,1,2,...} */ + int pci_irq; /* This irq */ + char name[MPT_NAME_LENGTH]; /* "iocN" */ char *prod_name; /* "LSIFC9x9" */ - u32 mem_phys; /* == f4020000 (mmap) */ volatile SYSIF_REGS *chip; /* == c8817000 (mmap) */ - CHIP_TYPE chip_type; - int mem_size; + volatile SYSIF_REGS *pio_chip; /* Programmed IO (downloadboot) */ + u32 mem_phys; /* == f4020000 (mmap) */ + u32 pio_mem_phys; /* Programmed IO (downloadboot) */ + int mem_size; /* mmap memory size */ int alloc_total; u32 last_state; int active; - int sod_reset; - unsigned long last_kickstart; - u8 *reply_alloc; /* Reply frames alloc ptr */ + u8 *reply_alloc; /* Reply frames alloc ptr */ dma_addr_t reply_alloc_dma; - MPT_FRAME_HDR *reply_frames; /* Reply frames - rounded up! */ + MPT_FRAME_HDR *reply_frames; /* Reply msg frames - rounded up! */ dma_addr_t reply_frames_dma; - int reply_depth; - int reply_sz; + u32 reply_frames_low_dma; + int reply_depth; /* Num Allocated reply frames */ + int reply_sz; /* Reply frame size */ + CHIP_TYPE chip_type; /* We (host driver) get to manage our own RequestQueue! */ - u8 *req_alloc; /* Request frames alloc ptr */ + u8 *req_alloc; /* Request frames alloc ptr */ dma_addr_t req_alloc_dma; - MPT_FRAME_HDR *req_frames; /* Request msg frames for PULL mode! */ + MPT_FRAME_HDR *req_frames; /* Request msg frames - rounded up! */ dma_addr_t req_frames_dma; - int req_depth; - int req_sz; - MPT_Q_TRACKER FreeQ; + u32 req_frames_low_dma; + int req_depth; /* Number of request frames */ + int req_sz; /* Request frame size (bytes) */ spinlock_t FreeQlock; + MPT_Q_TRACKER FreeQ; /* Pool of SCSI sense buffers for commands coming from * the SCSI mid-layer. We have one 256 byte sense buffer * for each REQ entry. */ u8 *sense_buf_pool; dma_addr_t sense_buf_pool_dma; - struct pci_dev *pcidev; -/* atomic_t userCnt; */ - u8 *memmap; + u32 sense_buf_low_dma; int mtrr_reg; - struct Scsi_Host *sh; + void *pcidev; /* struct pci_dev pointer */ + u8 *memmap; /* mmap address */ + struct Scsi_Host *sh; /* Scsi Host pointer */ + ScsiCfgData spi_data; /* Scsi config. data */ + MPT_IOCTL *ioctl; /* ioctl data pointer */ struct proc_dir_entry *ioc_dentry; - struct _MPT_ADAPTER *alt_ioc; + struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ + spinlock_t diagLock; /* diagnostic reset lock */ + int diagPending; + u32 biosVersion; /* BIOS version from IO Unit Page 2 */ + int eventTypes; /* Event logging parameters */ + int eventContext; /* Next event context */ + int eventLogSize; /* Max number of cached events */ + struct _mpt_ioctl_events *events; /* pointer to event log */ + u8 *FWImage; /* Pointer to FW */ + dma_addr_t FWImage_dma; + Q_TRACKER configQ; /* linked list of config. requests */ int hs_reply_idx; +#ifndef MFCNT + u32 pad0; +#else + u32 mfcnt; +#endif u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)]; u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)]; IOCFactsReply_t facts; PortFactsReply_t pfacts[2]; + FCPortPage0_t fc_port_page0[2]; LANPage0_t lan_cnfg_page0; LANPage1_t lan_cnfg_page1; u8 FirstWhoInit; - u8 pad1[3]; + u8 pad1[7]; } MPT_ADAPTER; @@ -324,7 +612,6 @@ * 0 = not Ok ... */ typedef int (*MPT_CALLBACK)(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); - typedef int (*MPT_EVHANDLER)(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply); typedef int (*MPT_RESETHANDLER)(MPT_ADAPTER *ioc, int reset_phase); /* reset_phase defs */ @@ -344,6 +631,47 @@ #define MPT_HOSTEVENT_IOC_BRINGUP 0x91 #define MPT_HOSTEVENT_IOC_RECOVER 0x92 +/* 32 vs 64 bit SGL code. + * + */ +#if defined(__ia64__) +typedef SGESimple64_t MptSge_t; +typedef SGEChain64_t MptChain_t; + +#define cpu_to_leXX(y, p) { \ + u32 low = (u32) (y & 0xFFFFFFFF); \ + u32 high = (u32) (y >> 32); \ + p.Low = cpu_to_le32(low); \ + p.High = cpu_to_le32(high); \ +} + +#define leXX_to_cpu(y, p) { \ + y = (dma_addr_t) le32_to_cpu(p.High); \ + y = (y << 32); \ + y |= le32_to_cpu(p.Low); \ +} + +#define MPT_SGE_ADDRESS_SIZE MPI_SGE_FLAGS_64_BIT_ADDRESSING +#define MPT_SCSIIO_MSG_FLAGS MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 + + +#else + +typedef SGESimple32_t MptSge_t; +typedef SGEChain32_t MptChain_t; +#define cpu_to_leXX(y,p) { \ + p = cpu_to_le32(y); \ +} + +#define leXX_to_cpu(y,p) { \ + y = le32_to_cpu(p); \ +} + +#define MPT_SGE_ADDRESS_SIZE MPI_SGE_FLAGS_32_BIT_ADDRESSING +#define MPT_SCSIIO_MSG_FLAGS MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 + +#endif + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Funky (private) macros... @@ -360,7 +688,8 @@ #define dhsprintk(x) #endif -#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME) +//#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME) +#if defined(MPT_DEBUG_MSG_FRAME) #define dmfprintk(x) printk x #else #define dmfprintk(x) @@ -372,24 +701,35 @@ #define dirqprintk(x) #endif -#ifdef MPT_DEBUG_EVENTS -#define deventprintk(x) printk x +#ifdef MPT_DEBUG_SG +#define dsgprintk(x) printk x #else -#define deventprintk(x) +#define dsgprintk(x) #endif -#ifdef MPT_DEBUG_SPINLOCK -#define dslprintk(x) printk x +#ifdef MPT_DEBUG_DV +#define ddvprintk(x) printk x #else -#define dslprintk(x) +#define ddvprintk(x) #endif -#ifdef MPT_DEBUG_SG -#define dsgprintk(x) printk x +#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) +#define ddvtprintk(x) printk x #else -#define dsgprintk(x) +#define ddvtprintk(x) #endif +#ifdef MPT_DEBUG_IOCTL +#define dctlprintk(x) printk x +#else +#define dctlprintk(x) +#endif + +#ifdef MPT_DEBUG_RESET +#define dtmprintk(x) printk x +#else +#define dtmprintk(x) +#endif #define MPT_INDEX_2_MFPTR(ioc,idx) \ (MPT_FRAME_HDR*)( (u8*)(ioc)->req_frames + (ioc)->req_sz * (idx) ) @@ -397,6 +737,9 @@ #define MFPTR_2_MPT_INDEX(ioc,mf) \ (int)( ((u8*)mf - (u8*)(ioc)->req_frames) / (ioc)->req_sz ) +#define MPT_INDEX_2_RFPTR(ioc,idx) \ + (MPT_FRAME_HDR*)( (u8*)(ioc)->reply_frames + (ioc)->req_sz * (idx) ) + #define Q_INIT(q,type) (q)->head = (q)->tail = (type*)(q) #define Q_IS_EMPTY(q) ((Q_ITEM*)(q)->head == (Q_ITEM*)(q)) @@ -425,7 +768,6 @@ _forw->back = _back; \ } - #define SWAB4(value) \ (u32)( (((value) & 0x000000ff) << 24) \ | (((value) & 0x0000ff00) << 8) \ @@ -457,64 +799,143 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -#endif /* } __KERNEL__ */ +/* + * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers + * Private to the driver. + */ +/* LOCAL structure and fields used when processing + * internally generated commands. These include: + * bus scan, dv and config requests. + */ +typedef struct _MPT_LOCAL_REPLY { + ConfigPageHeader_t header; + int completion; + u8 sense[SCSI_STD_SENSE_BYTES]; + u8 scsiStatus; + u8 skip; + u32 pad; +} MPT_LOCAL_REPLY; + +#define MPT_HOST_BUS_UNKNOWN (0xFF) +#define MPT_HOST_TOO_MANY_TM (0x05) +#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) +#define MPT_HOST_NO_CHAIN (0xFFFFFFFF) +#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF) +#define MPT_NVRAM_SYNC_MASK (0x0000FF00) +#define MPT_NVRAM_SYNC_SHIFT (8) +#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000) +#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000) +#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000) +#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000) +#define MPT_NVRAM_WIDE_DISABLE (0x00100000) +#define MPT_NVRAM_BOOT_CHOICE (0x00200000) + +typedef struct _MPT_SCSI_HOST { + MPT_ADAPTER *ioc; + int port; + u32 pad0; + struct scsi_cmnd **ScsiLookup; + /* Pool of buffers for chaining. ReqToChain + * and ChainToChain track index of chain buffers. + * ChainBuffer (DMA) virt/phys addresses. + * FreeChainQ (lock) locking mechanisms. + */ + int *ReqToChain; + int *ChainToChain; + u8 *ChainBuffer; + dma_addr_t ChainBufferDMA; + MPT_Q_TRACKER FreeChainQ; + spinlock_t FreeChainQlock; + u32 qtag_tick; + VirtDevice **Targets; + MPT_LOCAL_REPLY *pLocal; /* used for internal commands */ + struct timer_list timer; + struct timer_list TMtimer; /* Timer for TM commands ONLY */ + /* Pool of memory for holding SCpnts before doing + * OS callbacks. freeQ is the free pool. + */ + u8 *memQ; + DONE_Q_TRACKER freeQ; + DONE_Q_TRACKER doneQ; /* Holds Linux formmatted requests */ + DONE_Q_TRACKER pendingQ; /* Holds MPI formmatted requests */ + MPT_Q_TRACKER taskQ; /* TM request Q */ + spinlock_t freedoneQlock; + int taskQcnt; + u8 numTMrequests; + u8 tmPending; + u8 resetPending; + u8 is_spi; /* Parallel SCSI i/f */ + u8 negoNvram; /* DV disabled, nego NVRAM */ + u8 is_multipath; /* Multi-path compatible */ + u8 rsvd[2]; + MPT_FRAME_HDR *tmPtr; /* Ptr to TM request*/ + MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ + struct scsi_cmnd *abortSCpnt; + MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ +} MPT_SCSI_HOST; + +/* + * Structure for overlaying onto scsi_cmnd->SCp area + * NOTE: SCp area is 36 bytes min, 44 bytes max? + */ +typedef struct _scPrivate { + struct scsi_cmnd *forw; + struct scsi_cmnd *back; + void *p1; + void *p2; + u8 io_path_id; /* DMP */ + u8 pad[7]; +} scPrivate; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * More Dynamic Multi-Pathing stuff... + */ + +/* Forward decl, a strange C thing, to prevent gcc compiler warnings */ +struct scsi_cmnd; /* - * MPT Control IOCTLs and structures + * DMP service layer structure / API interface */ -#define MPT_MAGIC_NUMBER 'm' -#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w) -#define MPTRWPERF_CHK _IOR(MPT_MAGIC_NUMBER,13,struct mpt_raw_r_w) -#define MPTRWPERF_RESET _IOR(MPT_MAGIC_NUMBER,14,struct mpt_raw_r_w) -#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer) -#define MPTSCSICMD _IOWR(MPT_MAGIC_NUMBER,16,struct mpt_scsi_cmd) - -/* - * Define something *vague* enough that caller doesn't - * really need to know anything about device parameters - * (blk_size, capacity, etc.) - */ -struct mpt_raw_r_w { - unsigned int iocnum; /* IOC unit number */ - unsigned int port; /* IOC port number */ - unsigned int target; /* SCSI Target */ - unsigned int lun; /* SCSI LUN */ - unsigned int iters; /* N iterations */ - unsigned short nblks; /* number of blocks per IO */ - unsigned short qdepth; /* max Q depth on this device */ - unsigned char range; /* 0-100% of FULL disk capacity, 0=use (nblks X iters) */ - unsigned char skip; /* % of disk to skip */ - unsigned char rdwr; /* 0-100%, 0=pure ReaDs, 100=pure WRites */ - unsigned char seqran; /* 0-100%, 0=pure SEQential, 100=pure RANdom */ - unsigned int cache_sz; /* In Kb! Optimize hits to N Kb cache size */ -}; - -struct mpt_fw_xfer { - unsigned int iocnum; /* IOC unit number */ -/* u8 flags;*/ /* Message flags - bit field */ - unsigned int fwlen; - void *bufp; /* Pointer to firmware buffer */ -}; - -struct mpt_scsi_cmd { - unsigned int iocnum; /* IOC unit number */ - unsigned int port; /* IOC port number */ - unsigned int target; /* SCSI Target */ - unsigned int lun; /* SCSI LUN */ - SCSIIORequest_t scsi_req; - SCSIIOReply_t scsi_reply; -}; - -struct mpt_ioctl_sanity { - unsigned int iocnum; -}; +typedef struct _DmpServices { + VirtDevTracker VdevList; + struct semaphore *Daemon; + int (*ScsiPathSelect) + (struct scsi_cmnd *, MPT_SCSI_HOST **hd, int *target, int *lun); + int (*DmpIoDoneChk) + (MPT_SCSI_HOST *, struct scsi_cmnd *, + SCSIIORequest_t *, + SCSIIOReply_t *); + void (*mptscsih_scanVlist) + (MPT_SCSI_HOST *, int portnum); + int (*ScsiAbort) + (struct scsi_cmnd *); + int (*ScsiBusReset) + (struct scsi_cmnd *); +} DmpServices_t; -#ifdef __KERNEL__ /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Generic structure passed to the base mpt_config function. + */ +typedef struct _x_config_parms { + Q_ITEM linkage; /* linked list */ + struct timer_list timer; /* timer function for this request */ + ConfigPageHeader_t *hdr; + dma_addr_t physAddr; + int wait_done; /* wait for this request */ + u32 pageAddr; /* properly formatted */ + u8 action; + u8 dir; + u8 timeout; /* seconds */ + u8 pad1; + u16 status; + u16 pad2; +} CONFIGPARMS; +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * Public entry points... */ @@ -524,21 +945,28 @@ extern void mpt_event_deregister(int cb_idx); extern int mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func); extern void mpt_reset_deregister(int cb_idx); -extern int mpt_register_ascqops_strings(/*ASCQ_Table_t*/void *ascqTable, int ascqtbl_sz, const char **opsTable); +extern int mpt_register_ascqops_strings(void *ascqTable, int ascqtbl_sz, const char **opsTable); extern void mpt_deregister_ascqops_strings(void); extern MPT_FRAME_HDR *mpt_get_msg_frame(int handle, int iocid); extern void mpt_free_msg_frame(int handle, int iocid, MPT_FRAME_HDR *mf); extern void mpt_put_msg_frame(int handle, int iocid, MPT_FRAME_HDR *mf); -extern int mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req); +extern int mpt_send_handshake_request(int handle, int iocid, int reqBytes, u32 *req, int sleepFlag); +extern int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait, int sleepFlag); extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); extern MPT_ADAPTER *mpt_adapter_find_first(void); extern MPT_ADAPTER *mpt_adapter_find_next(MPT_ADAPTER *prev); +extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); -extern void mpt_print_ioc_facts(MPT_ADAPTER *ioc, char *buf, int *size, int len); +extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); +extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); /* * Public data decl's... */ +extern MPT_ADAPTER *mpt_adapters[MPT_MAX_ADAPTERS]; +extern struct proc_dir_entry *mpt_proc_root_dir; +extern DmpServices_t *DmpService; + extern int mpt_lan_index; /* needed by mptlan.c */ extern int mpt_stm_index; /* needed by mptstm.c */ @@ -563,7 +991,7 @@ #define offsetof(t, m) ((size_t) (&((t *)0)->m)) #endif -#if defined(__alpha__) || defined(__sparc_v9__) +#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) #define CAST_U32_TO_PTR(x) ((void *)(u64)x) #define CAST_PTR_TO_U32(x) ((u32)(u64)x) #else @@ -576,6 +1004,40 @@ ((pflags) & MPI_PORTFACTS_PROTOCOL_TARGET) ? 'T' : 't', \ ((pflags) & MPI_PORTFACTS_PROTOCOL_LAN) ? 'L' : 'l', \ ((pflags) & MPI_PORTFACTS_PROTOCOL_LOGBUSADDR) ? 'B' : 'b' + +/* + * Shifted SGE Defines - Use in SGE with FlagsLength member. + * Otherwise, use MPI_xxx defines (refer to "lsi/mpi.h" header). + * Defaults: 32 bit SGE, SYSTEM_ADDRESS if direction bit is 0, read + */ +#define MPT_TRANSFER_IOC_TO_HOST (0x00000000) +#define MPT_TRANSFER_HOST_TO_IOC (0x04000000) +#define MPT_SGE_FLAGS_LAST_ELEMENT (0x80000000) +#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) +#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) +#define MPT_SGE_FLAGS_DIRECTION (0x04000000) +#define MPT_SGE_FLAGS_ADDRESSING (MPT_SGE_ADDRESS_SIZE << MPI_SGE_FLAGS_SHIFT) +#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) + +#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) +#define MPT_SGE_FLAGS_SIMPLE_ELEMENT (0x10000000) +#define MPT_SGE_FLAGS_CHAIN_ELEMENT (0x30000000) +#define MPT_SGE_FLAGS_ELEMENT_MASK (0x30000000) + +#define MPT_SGE_FLAGS_SSIMPLE_READ \ + (MPT_SGE_FLAGS_LAST_ELEMENT | \ + MPT_SGE_FLAGS_END_OF_BUFFER | \ + MPT_SGE_FLAGS_END_OF_LIST | \ + MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPT_SGE_FLAGS_ADDRESSING | \ + MPT_TRANSFER_IOC_TO_HOST) +#define MPT_SGE_FLAGS_SSIMPLE_WRITE \ + (MPT_SGE_FLAGS_LAST_ELEMENT | \ + MPT_SGE_FLAGS_END_OF_BUFFER | \ + MPT_SGE_FLAGS_END_OF_LIST | \ + MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPT_SGE_FLAGS_ADDRESSING | \ + MPT_TRANSFER_HOST_TO_IOC) /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #endif diff -Nru a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c --- a/drivers/message/fusion/mptctl.c Thu May 30 21:28:58 2002 +++ b/drivers/message/fusion/mptctl.c Thu May 30 21:28:58 2002 @@ -9,6 +9,12 @@ * This driver would not exist if not for Alan Cox's development * of the linux i2o driver. * + * A special thanks to Pamela Delaney (LSI Logic) for tons of work + * and countless enhancements while adding support for the 1030 + * chip family. Pam has been instrumental in the development of + * of the 2.xx.xx series fusion drivers, and her contributions are + * far too numerous to hope to list in one place. + * * A huge debt of gratitude is owed to David S. Miller (DaveM) * for fixing much of the stupid and broken stuff in the early * driver while porting to sparc64 platform. THANK YOU! @@ -18,16 +24,17 @@ * (plus Eddie's other helpful hints and insights) * * Thanks to Arnaldo Carvalho de Melo for finding and patching - * a potential memory leak in mpt_ioctl_do_fw_download(), + * a potential memory leak in mptctl_do_fw_download(), * and for some kmalloc insight:-) * * (see also mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston, Noah Romer - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptctl.c,v 1.25.4.1 2001/08/24 20:07:06 sralston Exp $ + * $Id: mptctl.c,v 1.52 2002/02/27 18:44:24 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -69,7 +76,6 @@ #include #include #include -#include #include #include #include @@ -80,11 +86,16 @@ #include #include -#include +#include /* needed for access to Scsi_Host struct */ +#include +#include /* for io_request_lock (spinlock) decl */ +#include "../../scsi/scsi.h" +#include "../../scsi/hosts.h" #define COPYRIGHT "Copyright (c) 1999-2001 LSI Logic Corporation" -#define MODULEAUTHOR "Steven J. Ralston, Noah Romer" +#define MODULEAUTHOR "Steven J. Ralston, Noah Romer, Pamela Delaney" #include "mptbase.h" +#include "mptctl.h" /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #define my_NAME "Fusion MPT misc device (ioctl) driver" @@ -95,21 +106,59 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_id = -1; -static int rwperf_reset = 0; static struct semaphore mptctl_syscall_sem_ioc[MPT_MAX_ADAPTERS]; +static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static int mpt_ioctl_rwperf(unsigned long arg); -static int mpt_ioctl_rwperf_status(unsigned long arg); -static int mpt_ioctl_rwperf_reset(unsigned long arg); -static int mpt_ioctl_fw_download(unsigned long arg); -static int mpt_ioctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen); -static int mpt_ioctl_scsi_cmd(unsigned long arg); +struct buflist { + u8 *kptr; + int len; +}; + +/* + * Function prototypes. Called from OS entry point mptctl_ioctl. + * arg contents specific to function. + */ +static int mptctl_fw_download(unsigned long arg); +static int mptctl_getiocinfo (unsigned long arg); +static int mptctl_gettargetinfo (unsigned long arg); +static int mptctl_readtest (unsigned long arg); +static int mptctl_mpt_command (unsigned long arg); +static int mptctl_eventquery (unsigned long arg); +static int mptctl_eventenable (unsigned long arg); +static int mptctl_eventreport (unsigned long arg); +static int mptctl_replace_fw (unsigned long arg); + +static int mptctl_do_reset(unsigned long arg); + +static int mptctl_compaq_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int mptctl_cpq_getpciinfo(unsigned long arg); +static int mptctl_cpq_getdriver(unsigned long arg); +static int mptctl_cpq_ctlr_status(unsigned long arg); +static int mptctl_cpq_target_address(unsigned long arg); +static int mptctl_cpq_passthru(unsigned long arg); +static int mptctl_compaq_scsiio(VENDOR_IOCTL_REQ *pVenReq, cpqfc_passthru_t *pPass); + +/* + * Private function calls. + */ +static int mptctl_do_mpt_command (struct mpt_ioctl_command karg, char *mfPtr, int local); +static int mptctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen); +static MptSge_t *kbuf_alloc_2_sgl( int bytes, u32 dir, int sge_offset, int *frags, + struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); +static void kfree_sgl( MptSge_t *sgl, dma_addr_t sgl_dma, + struct buflist *buflist, MPT_ADAPTER *ioc); +static void mptctl_timer_expired (unsigned long data); + +/* + * Reset Handler cleanup function + */ +static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -132,26 +181,27 @@ /* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */ #define MAX_KMALLOC_SZ (128*1024) -struct buflist { - u8 *kptr; - int len; -}; - -#define myMAX_TARGETS (1<<4) -#define myMAX_LUNS (1<<3) -#define myMAX_T_MASK (myMAX_TARGETS-1) -#define myMAX_L_MASK (myMAX_LUNS-1) -static u8 DevInUse[myMAX_TARGETS][myMAX_LUNS] = {{0,0}}; -static u32 DevIosCount[myMAX_TARGETS][myMAX_LUNS] = {{0,0}}; +#define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */ static u32 fwReplyBuffer[16]; static pMPIDefaultReply_t ReplyMsg = NULL; -/* some private forw protos */ -static SGESimple32_t *kbuf_alloc_2_sgl( int bytes, u32 dir, int *frags, - struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); -static void kfree_sgl( SGESimple32_t *sgl, dma_addr_t sgl_dma, - struct buflist *buflist, MPT_ADAPTER *ioc); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Function to return 0 if the sge Address member is 0 and + * non-zero else. Used in the mpt_do_fw_download routines. + */ +static inline int +mptctl_test_address(MptSge_t *sge) +{ +#ifdef __ia64__ + if ((sge->Address.Low) || (sge->Address.High)) + return 1; + else + return 0; +#else + return sge->Address; +#endif +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** @@ -159,7 +209,7 @@ * @ioc: Pointer to MPT adapter * @nonblock: boolean, non-zero if O_NONBLOCK is set * - * All of the mptctl commands can potentially sleep, which is illegal + * All of the ioctl commands can potentially sleep, which is illegal * with a spinlock held, thus we perform mutual exclusion here. * * Returns negative errno on error, or zero for success. @@ -167,16 +217,27 @@ static inline int mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) { - dprintk((KERN_INFO MYNAM "::mpt_syscall_down(%p,%d) called\n", ioc, nonblock)); + int rc = 0; + dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down(%p,%d) called\n", ioc, nonblock)); +#if defined(__sparc__) && defined(__sparc_v9__) /*{*/ + if (!nonblock) { + if (down_interruptible(&mptctl_syscall_sem_ioc[ioc->id])) + rc = -ERESTARTSYS; + } else { + rc = -EPERM; + } +#else if (nonblock) { if (down_trylock(&mptctl_syscall_sem_ioc[ioc->id])) - return -EAGAIN; + rc = -EAGAIN; } else { if (down_interruptible(&mptctl_syscall_sem_ioc[ioc->id])) - return -ERESTARTSYS; + rc = -ERESTARTSYS; } - return 0; +#endif + dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down return %d\n", rc)); + return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -189,18 +250,150 @@ static int mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { - u8 targ; + char *sense_data; + int sz, req_index; + u16 iocStatus; + u8 cmd; + + dctlprintk((MYIOC_s_INFO_FMT ": mptctl_reply()!\n", ioc->name)); + if (req) + cmd = req->u.hdr.Function; + else + return 1; - //dprintk((KERN_DEBUG MYNAM ": Got mptctl_reply()!\n")); + if (ioc->ioctl) { + /* If timer is not running, then an error occurred. + * A timeout will call the reset routine to reload the messaging + * queues. + * Main callback will free message and reply frames. + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_TIMER_ACTIVE) { + /* Delete this timer + */ + del_timer (&ioc->ioctl->timer); + ioc->ioctl->status &= ~MPT_IOCTL_STATUS_TIMER_ACTIVE; + + /* Set the overall status byte. Good if: + * IOC status is good OR if no reply and a SCSI IO request + */ + if (reply) { + /* Copy the reply frame (which much exist + * for non-SCSI I/O) to the IOC structure. + */ + dctlprintk((MYIOC_s_INFO_FMT ": Copying Reply Frame @%p to IOC!\n", + ioc->name, reply)); + memcpy(ioc->ioctl->ReplyFrame, reply, + MIN(ioc->reply_sz, 4*reply->u.reply.MsgLength)); + ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID; + + /* Set the command status to GOOD if IOC Status is GOOD + * OR if SCSI I/O cmd and data underrun or recovered error. + */ + iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK; + if (iocStatus == MPI_IOCSTATUS_SUCCESS) + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + + if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) || + (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) { + if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || + (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + } + } + + /* Copy the sense data - if present + */ + if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && + (reply->u.sreply.SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)){ + + sz = req->u.scsireq.SenseBufferLength; + req_index = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); + sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); + memcpy(ioc->ioctl->sense, sense_data, sz); + ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; + } + } else if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || + (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + } - if (req && req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) { - targ = req->u.scsireq.TargetID & myMAX_T_MASK; - DevIosCount[targ][0]--; - } else if (reply && req && req->u.hdr.Function == MPI_FUNCTION_FW_DOWNLOAD) { - // NOTE: Expects/requires non-Turbo reply! - dprintk((KERN_INFO MYNAM ": Caching MPI_FUNCTION_FW_DOWNLOAD reply!\n")); - memcpy(fwReplyBuffer, reply, MIN(sizeof(fwReplyBuffer), 4*reply->u.reply.MsgLength)); - ReplyMsg = (pMPIDefaultReply_t) fwReplyBuffer; + /* We are done, issue wake up + */ + ioc->ioctl->wait_done = 1; + wake_up (&mptctl_wait); + } else if (reply && cmd == MPI_FUNCTION_FW_DOWNLOAD) { + /* Two paths to FW DOWNLOAD! */ + // NOTE: Expects/requires non-Turbo reply! + dctlprintk((MYIOC_s_INFO_FMT ":Caching MPI_FUNCTION_FW_DOWNLOAD reply!\n", + ioc->name)); + memcpy(fwReplyBuffer, reply, MIN(sizeof(fwReplyBuffer), 4*reply->u.reply.MsgLength)); + ReplyMsg = (pMPIDefaultReply_t) fwReplyBuffer; + } + } + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_timer_expired + * + * Call back for timer process. Used only for ioctl functionality. + * + */ +static void mptctl_timer_expired (unsigned long data) +{ + MPT_IOCTL *ioctl = (MPT_IOCTL *) data; + + dctlprintk((KERN_NOTICE MYNAM ": Timer Expired! Host %d\n", + ioctl->ioc->id)); + + /* Issue a reset for this device. + * The IOC is not responding. + */ + mpt_HardResetHandler(ioctl->ioc, NO_SLEEP); + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_ioc_reset + * + * Clean-up functionality. Used only if there has been a + * reload of the FW due. + * + */ +static int +mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) +{ + MPT_IOCTL *ioctl = ioc->ioctl; + dctlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to IOCTL driver!\n", + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); + + if (reset_phase == MPT_IOC_PRE_RESET){ + + /* Someone has called the reset handler to + * do a hard reset. No more replies from the FW. + * Delete the timer. + */ + if (ioctl && (ioctl->status & MPT_IOCTL_STATUS_TIMER_ACTIVE)){ + + /* Delete this timer + */ + del_timer(&ioctl->timer); + } + + } else { + /* Set the status and continue IOCTL + * processing. All memory will be free'd + * by originating thread after wake_up is + * called. + */ + if (ioctl && (ioctl->status & MPT_IOCTL_STATUS_TIMER_ACTIVE)){ + ioctl->status = MPT_IOCTL_STATUS_DID_TIMEOUT; + + /* Wake up the calling process + */ + ioctl->wait_done = 1; + wake_up(&mptctl_wait); + } } return 1; @@ -208,7 +401,7 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * struct file_operations functionality. + * struct file_operations functionality. * Members: * llseek, write, read, ioctl, open, release */ @@ -234,63 +427,93 @@ static ssize_t mptctl_read(struct file *file, char *buf, size_t count, loff_t *ptr) { + printk(KERN_ERR MYNAM ": ioctl READ not yet supported\n"); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT ioctl handler + * cmd - specify the particular IOCTL command to be issued + * arg - data specific to the command. Must not be null. */ static int -mpt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +mptctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { - struct mpt_ioctl_sanity *usanity = (struct mpt_ioctl_sanity *) arg; - struct mpt_ioctl_sanity ksanity; + mpt_ioctl_header *uhdr = (mpt_ioctl_header *) arg; + mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; - dprintk((KERN_INFO MYNAM "::mpt_ioctl() called\n")); + dctlprintk(("mptctl_ioctl() called\n")); - if (copy_from_user(&ksanity, usanity, sizeof(ksanity))) { - printk(KERN_ERR "%s::mpt_ioctl() @%d - " - "Unable to copy mpt_ioctl_sanity data @ %p\n", - __FILE__, __LINE__, (void*)usanity); + if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { + printk(KERN_ERR "%s::mptctl_ioctl() @%d - " + "Unable to copy mpt_ioctl_header data @ %p\n", + __FILE__, __LINE__, (void*)uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ - /* Verify intended MPT adapter */ - iocnumX = ksanity.iocnum & 0xFF; + /* Test for Compaq-specific IOCTL's. + */ + if ((cmd == CPQFCTS_GETPCIINFO) || (cmd == CPQFCTS_CTLR_STATUS) || + (cmd == CPQFCTS_GETDRIVER) || (cmd == CPQFCTS_SCSI_PASSTHRU) || + (cmd == CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS)) + return mptctl_compaq_ioctl(file, cmd, arg); + + /* Verify intended MPT adapter - set iocnum and the adapter + * pointer (iocp) + */ + iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { - printk(KERN_ERR "%s::mpt_ioctl() @%d - ioc%d not found!\n", + printk(KERN_ERR "%s::mptctl_ioctl() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnumX); return -ENODEV; } + /* Handle those commands that are just returning + * information stored in the driver. + * These commands should never time out and are unaffected + * by TM and FW reloads. + */ + if (cmd == MPTIOCINFO) { + return mptctl_getiocinfo(arg); + } else if (cmd == MPTTARGETINFO) { + return mptctl_gettargetinfo(arg); + } else if (cmd == MPTTEST) { + return mptctl_readtest(arg); + } else if (cmd == MPTEVENTQUERY) { + return mptctl_eventquery(arg); + } else if (cmd == MPTEVENTENABLE) { + return mptctl_eventenable(arg); + } else if (cmd == MPTEVENTREPORT) { + return mptctl_eventreport(arg); + } else if (cmd == MPTFWREPLACE) { + return mptctl_replace_fw(arg); + } + + /* All of these commands require an interrupt or + * are unknown/illegal. + */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; - dprintk((KERN_INFO MYNAM "::mpt_ioctl() - Using %s\n", iocp->name)); + dctlprintk((MYIOC_s_INFO_FMT ": mptctl_ioctl()\n", iocp->name)); switch(cmd) { - case MPTRWPERF: - ret = mpt_ioctl_rwperf(arg); - break; - case MPTRWPERF_CHK: - ret = mpt_ioctl_rwperf_status(arg); - break; - case MPTRWPERF_RESET: - ret = mpt_ioctl_rwperf_reset(arg); - break; case MPTFWDOWNLOAD: - ret = mpt_ioctl_fw_download(arg); + ret = mptctl_fw_download(arg); + break; + case MPTCOMMAND: + ret = mptctl_mpt_command(arg); break; - case MPTSCSICMD: - ret = mpt_ioctl_scsi_cmd(arg); + case MPTHARDRESET: + ret = mptctl_do_reset(arg); break; default: ret = -EINVAL; @@ -301,6 +524,36 @@ return ret; } +static int mptctl_do_reset(unsigned long arg) +{ + struct mpt_ioctl_diag_reset *urinfo = (struct mpt_ioctl_diag_reset *) arg; + struct mpt_ioctl_diag_reset krinfo; + MPT_ADAPTER *iocp; + + dctlprintk((KERN_INFO "mptctl_do_reset called.\n")); + + if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { + printk(KERN_ERR "%s@%d::mptctl_do_reset - " + "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", + __FILE__, __LINE__, (void*)urinfo); + return -EFAULT; + } + + if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { + printk(KERN_ERR "%s@%d::mptctl_do_reset - ioc%d not found!\n", + __FILE__, __LINE__, krinfo.hdr.iocnum); + return -ENXIO; /* (-6) No such device or address */ + } + + if (mpt_HardResetHandler(iocp, NO_SLEEP) != 0) { + printk (KERN_ERR "%s@%d::mptctl_do_reset - reset failed.\n", + __FILE__, __LINE__); + return -1; + } + + return 0; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptctl_open(struct inode *inode, struct file *file) { @@ -317,13 +570,29 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * MPT FW download function. Cast the arg into the mpt_fw_xfer structure. + * This structure contains: iocnum, firmware length (bytes), + * pointer to user space memory where the fw image is stored. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENXIO if no such device + * -EAGAIN if resource problem + * -ENOMEM if no memory for SGE + * -EMLINK if too many chain buffers required + * -EBADRQC if adapter does not support FW download + * -EBUSY if adapter is busy + * -ENOMSG if FW upload returned bad status + */ static int -mpt_ioctl_fw_download(unsigned long arg) +mptctl_fw_download(unsigned long arg) { struct mpt_fw_xfer *ufwdl = (struct mpt_fw_xfer *) arg; struct mpt_fw_xfer kfwdl; - dprintk((KERN_INFO "mpt_ioctl_fwdl called. mptctl_id = %xh\n", mptctl_id)); //tc + dctlprintk((KERN_INFO "mptctl_fwdl called. mptctl_id = %xh\n", mptctl_id)); //tc if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { printk(KERN_ERR "%s@%d::_ioctl_fwdl - " "Unable to copy mpt_fw_xfer struct @ %p\n", @@ -331,44 +600,52 @@ return -EFAULT; } - return mpt_ioctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); + return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * MPT FW Download + * FW Download engine. + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENXIO if no such device + * -EAGAIN if resource problem + * -ENOMEM if no memory for SGE + * -EMLINK if too many chain buffers required + * -EBADRQC if adapter does not support FW download + * -EBUSY if adapter is busy + * -ENOMSG if FW upload returned bad status */ static int -mpt_ioctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen) +mptctl_do_fw_download(int ioc, char *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; MPT_ADAPTER *iocp; -// char *fwbuf; -// dma_addr_t fwbuf_dma; - FWDownloadTCSGE_t *fwVoodoo; -// SGEAllUnion_t *fwSgl; + FWDownloadTCSGE_t *ptsge; + MptSge_t *sgl; + MptSge_t *sgOut, *sgIn; + struct buflist *buflist; + struct buflist *bl; + dma_addr_t sgl_dma; int ret; - - SGESimple32_t *sgl; - SGESimple32_t *sgOut, *sgIn; - dma_addr_t sgl_dma; - struct buflist *buflist = NULL; - struct buflist *bl = NULL; - int numfrags = 0; - int maxfrags; - int n = 0; - u32 sgdir; - u32 nib; - int fw_bytes_copied = 0; - u16 iocstat; - int i; - - dprintk((KERN_INFO "mpt_ioctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); - - dprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); - dprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); - dprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); + int numfrags = 0; + int maxfrags; + int n = 0; + u32 sgdir; + u32 nib; + int fw_bytes_copied = 0; + int i; + int cntdn; + int sge_offset = 0; + u16 iocstat; + + dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id)); + + dctlprintk((KERN_INFO "DbG: kfwdl.bufp = %p\n", ufwbuf)); + dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen)); + dctlprintk((KERN_INFO "DbG: kfwdl.ioc = %04xh\n", ioc)); if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) { printk("%s@%d::_ioctl_fwdl - ioc%d not found!\n", @@ -376,11 +653,13 @@ return -ENXIO; /* (-6) No such device or address */ } + /* Valid device. Get a message frame and construct the FW download message. + */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) return -EAGAIN; dlmsg = (FWDownload_t*) mf; - fwVoodoo = (FWDownloadTCSGE_t *) &dlmsg->SGL; - sgOut = (SGESimple32_t *) (fwVoodoo + 1); + ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; + sgOut = (MptSge_t *) (ptsge + 1); /* * Construct f/w download request @@ -392,27 +671,36 @@ dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; dlmsg->MsgFlags = 0; - fwVoodoo->Reserved = 0; - fwVoodoo->ContextSize = 0; - fwVoodoo->DetailsLength = 12; - fwVoodoo->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; - fwVoodoo->Reserved1 = 0; - fwVoodoo->ImageOffset = 0; - fwVoodoo->ImageSize = cpu_to_le32(fwlen); + /* Set up the Transaction SGE. + */ + ptsge->Reserved = 0; + ptsge->ContextSize = 0; + ptsge->DetailsLength = 12; + ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; + ptsge->Reserved_0100_Checksum = 0; + ptsge->ImageOffset = 0; + ptsge->ImageSize = cpu_to_le32(fwlen); + + /* Add the SGL + */ /* * Need to kmalloc area(s) for holding firmware image bytes. * But we need to do it piece meal, using a proper * scatter gather list (with 128kB MAX hunks). - * + * * A practical limit here might be # of sg hunks that fit into * a single IOC request frame; 12 or 8 (see below), so: * For FC9xx: 12 x 128kB == 1.5 mB (max) * For C1030: 8 x 128kB == 1 mB (max) * We could support chaining, but things get ugly(ier:) + * + * Set the sge_offset to the start of the sgl (bytes). */ sgdir = 0x04000000; /* IOC will READ from sys mem */ - if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) + sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); + if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, + &numfrags, &buflist, &sgl_dma, iocp)) == NULL) return -ENOMEM; /* @@ -420,16 +708,19 @@ * for FC9xx f/w image, but calculate max number of sge hunks * we can fit into a request frame, and limit ourselves to that. * (currently no chain support) - * For FC9xx: (128-12-16)/8 = 12.5 = 12 - * For C1030: (96-12-16)/8 = 8.5 = 8 + * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE + * Request maxfrags + * 128 12 + * 96 8 + * 64 4 */ - maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / sizeof(SGESimple32_t); + maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / sizeof(MptSge_t); if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; } - dprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); + dctlprintk((KERN_INFO "DbG: sgl buffer = %p, sgfrags = %d\n", sgl, numfrags)); /* * Parse SG list, copying sgl itself, @@ -439,11 +730,17 @@ sgIn = sgl; bl = buflist; for (i=0; i < numfrags; i++) { - nib = (le32_to_cpu(sgIn->FlagsLength) & 0xF0000000) >> 28; - /* skip ignore/chain. */ + + /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE + * Skip everything but Simple. If simple, copy from + * user space into kernel space. + * Note: we should not have anything but Simple as + * Chain SGE are illegal. + */ + nib = (le32_to_cpu(sgIn->FlagsLength) & 0x30000000) >> 28; if (nib == 0 || nib == 3) { ; - } else if (sgIn->Address) { + } else if (mptctl_test_address(sgIn)) { *sgOut = *sgIn; n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { @@ -478,26 +775,24 @@ /* * Wait until the reply has been received */ - { - int foo = 0; - - while (ReplyMsg == NULL) { - if (!(foo%1000000)) { - dprintk((KERN_INFO "DbG::_do_fwdl: " - "In ReplyMsg loop - iteration %d\n", - foo)); //tc - } + for (cntdn=HZ*60, i=1; ReplyMsg == NULL; cntdn--, i++) { + if (!cntdn) { ret = -ETIME; - if (++foo > 60000000) - goto fwdl_out; - mb(); - schedule(); - barrier(); + goto fwdl_out; + } + + if (!(i%HZ)) { + dctlprintk((KERN_INFO "DbG::_do_fwdl: " + "In ReplyMsg loop - iteration %d\n", + i)); } + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); } if (sgl) - kfree_sgl(sgl, sgl_dma, buflist, iocp); + kfree_sgl(sgl, sgl_dma, buflist, iocp); iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { @@ -527,32 +822,46 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * NEW rwperf (read/write performance) stuff starts here... + * SGE Allocation routine + * + * Inputs: bytes - number of bytes to be transferred + * sgdir - data direction + * sge_offset - offset (in bytes) from the start of the request + * frame to the first SGE + * ioc - pointer to the mptadapter + * Outputs: frags - number of scatter gather elements + * blp - point to the buflist pointer + * sglbuf_dma - pointer to the (dma) sgl + * Returns: Null if failes + * pointer to the (virtual) sgl if successful. */ - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static SGESimple32_t * -kbuf_alloc_2_sgl(int bytes, u32 sgdir, int *frags, +static MptSge_t * +kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) { - SGESimple32_t *sglbuf = NULL; - struct buflist *buflist = NULL; + MptSge_t *sglbuf = NULL; /* pointer to array of SGE + * and chain buffers */ + struct buflist *buflist = NULL; /* kernel routine */ + MptSge_t *sgl; + MptChain_t *last_chain = NULL; int numfrags = 0; int fragcnt = 0; int alloc_sz = MIN(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg! int bytes_allocd = 0; int this_alloc; - SGESimple32_t *sgl; - u32 pa; // phys addr - SGEChain32_t *last_chain = NULL; - SGEChain32_t *old_chain = NULL; + dma_addr_t pa; // phys addr int chaincnt = 0; int i, buflist_ent; int sg_spill = MAX_FRAGS_SPILL1; int dir; + /* initialization */ *frags = 0; *blp = NULL; + + /* Allocate and initialize an array of kernel + * structures for the SG elements. + */ i = MAX_SGL_BYTES / 8; buflist = kmalloc(i, GFP_USER); if (buflist == NULL) @@ -560,6 +869,11 @@ memset(buflist, 0, i); buflist_ent = 0; + /* Allocate a single block of memory to store the sg elements and + * the chain buffers. The calling routine is responsible for + * copying the data in this array into the correct place in the + * request and chain buffers. + */ sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma); if (sglbuf == NULL) goto free_and_fail; @@ -569,7 +883,15 @@ else dir = PCI_DMA_FROMDEVICE; + /* At start: + * sgl = sglbuf = point to beginning of sg buffer + * buflist_ent = 0 = first kernel structure + * sg_spill = number of SGE that can be written before the first + * chain element. + * + */ sgl = sglbuf; + sg_spill = ((ioc->req_sz - sge_offset)/ sizeof(MptSge_t)) - 1; while (bytes_allocd < bytes) { this_alloc = MIN(alloc_sz, bytes-bytes_allocd); buflist[buflist_ent].len = this_alloc; @@ -594,7 +916,7 @@ /* Write one SIMPLE sge */ sgl->FlagsLength = cpu_to_le32(0x10000000|sgdir|this_alloc); dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); - sgl->Address = cpu_to_le32(dma_addr); + cpu_to_leXX(dma_addr, sgl->Address); fragcnt++; numfrags++; @@ -609,24 +931,43 @@ if (fragcnt == sg_spill) { dma_addr_t chain_link; - if (last_chain != NULL) - last_chain->NextChainOffset = 0x1E; - - fragcnt = 0; - sg_spill = MAX_FRAGS_SPILL2; + /* If there is a chain element, set the offset + * (in 32 bit words) to the next chain element. + * fragcnt = # sge = 8 bytes = 2 words + * + * Set the length of the chain element (bytes) + * This includes the size of the next chain element. + * + * We are now done with last_chain and the previous + * buffer. + */ + if (last_chain != NULL) { + last_chain->NextChainOffset = fragcnt * 2; + last_chain->Length = cpu_to_le16((fragcnt+1) * 8); + } - /* fixup previous SIMPLE sge */ + /* Finish the current buffer: + * - add the LE bit to last sge + * - add the chain element + */ sgl[-1].FlagsLength |= cpu_to_le32(0x80000000); chain_link = (*sglbuf_dma) + ((u8 *)(sgl+1) - (u8 *)sglbuf); /* Write one CHAIN sge */ - sgl->FlagsLength = cpu_to_le32(0x30000080); - sgl->Address = cpu_to_le32(chain_link); +// sgl->FlagsLength = cpu_to_le32(0x30000080); + sgl->FlagsLength = cpu_to_le32(0x30000000); + cpu_to_leXX(chain_link, sgl->Address); + + /* Reset everything for the next SGE series, + * save a ptr to the chain element in last_chain + */ + fragcnt = 0; +// sg_spill = MAX_FRAGS_SPILL2; + sg_spill = (ioc->req_sz / sizeof(MptSge_t)) - 1; - old_chain = last_chain; - last_chain = (SGEChain32_t*)sgl; + last_chain = (MptChain_t*)sgl; chaincnt++; numfrags++; sgl++; @@ -646,18 +987,19 @@ /* Last sge fixup: set LE+eol+eob bits */ sgl[-1].FlagsLength |= cpu_to_le32(0xC1000000); - /* Chain fixup needed? */ - if (last_chain != NULL && fragcnt < 16) + /* Chain fixup needed? */ /* SteveR CHECKME!!! */ +// if (last_chain != NULL && fragcnt < 16) + if (last_chain != NULL) last_chain->Length = cpu_to_le16(fragcnt * 8); *frags = numfrags; *blp = buflist; - dprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " + dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " "%d SG frags generated! (%d CHAIN%s)\n", numfrags, chaincnt, chaincnt>1?"s":"")); - dprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " + dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - " "last (big) alloc_sz=%d\n", alloc_sz)); @@ -675,7 +1017,7 @@ if ((le32_to_cpu(sglbuf[i].FlagsLength) >> 24) == 0x30) continue; - dma_addr = le32_to_cpu(sglbuf[i].Address); + leXX_to_cpu(dma_addr, sglbuf[i].Address); kptr = buflist[i].kptr; len = buflist[i].len; @@ -688,16 +1030,19 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Routine to free the SGL elements. + */ static void -kfree_sgl(SGESimple32_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) +kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc) { - SGESimple32_t *sg = sgl; + MptSge_t *sg = sgl; struct buflist *bl = buflist; u32 nib; int dir; int n = 0; - if (le32_to_cpu(sg->FlagsLength) & 0x04000000) + if ((le32_to_cpu(sg->FlagsLength) & 0x04000000)) dir = PCI_DMA_TODEVICE; else dir = PCI_DMA_FROMDEVICE; @@ -707,12 +1052,12 @@ /* skip ignore/chain. */ if (nib == 0 || nib == 3) { ; - } else if (sg->Address) { + } else if (mptctl_test_address(sg)) { dma_addr_t dma_addr; void *kptr; int len; - dma_addr = le32_to_cpu(sg->Address); + leXX_to_cpu(dma_addr, sg->Address); kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); @@ -725,12 +1070,12 @@ } /* we're at eob! */ - if (sg->Address) { + if (mptctl_test_address(sg)) { dma_addr_t dma_addr; void *kptr; int len; - dma_addr = le32_to_cpu(sg->Address); + leXX_to_cpu(dma_addr, sg->Address); kptr = bl->kptr; len = bl->len; pci_unmap_single(ioc->pcidev, dma_addr, len, dir); @@ -740,363 +1085,1657 @@ pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma); kfree(buflist); - dprintk((KERN_INFO MYNAM "-SG: Free'd 1 SGL buf + %d kbufs!\n", n)); + dctlprintk((KERN_INFO MYNAM "-SG: Free'd 1 SGL buf + %d kbufs!\n", n)); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_getiocinfo - Query the host adapter for IOC information. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ static int -mpt_ioctl_rwperf_init(struct mpt_raw_r_w *dest, unsigned long src, - char *caller, MPT_ADAPTER **iocpp) +mptctl_getiocinfo (unsigned long arg) { - char *myname = "_rwperf_init()"; - int ioc; + struct mpt_ioctl_iocinfo *uarg = (struct mpt_ioctl_iocinfo *) arg; + struct mpt_ioctl_iocinfo karg; + MPT_ADAPTER *ioc; + struct pci_dev *pdev; + struct Scsi_Host *sh; + MPT_SCSI_HOST *hd; + int iocnum; + int numDevices = 0; + unsigned int max_id; + int ii; + int port; + u8 revision; + + dctlprintk((": mptctl_getiocinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_iocinfo))) { + printk(KERN_ERR "%s@%d::mptctl_getiocinfo - " + "Unable to read in mpt_ioctl_iocinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } - /* get copy of structure passed from user space */ - if (copy_from_user(dest, (void*)src, sizeof(*dest))) { - printk(KERN_ERR MYNAM "::%s() @%d - Can't copy mpt_raw_r_w data @ %p\n", - myname, __LINE__, (void*)src); - return -EFAULT; /* (-14) Bad address */ - } else { - dprintk((KERN_INFO MYNAM "-perf: PerfInfo.{ioc,targ,qd,iters,nblks}" - ": %d %d %d %d %d\n", - dest->iocnum, dest->target, - (int)dest->qdepth, dest->iters, dest->nblks )); - dprintk((KERN_INFO MYNAM "-perf: PerfInfo.{cache,skip,range,rdwr,seqran}" - ": %d %d %d %d %d\n", - dest->cache_sz, dest->skip, dest->range, - dest->rdwr, dest->seqran )); - - /* Get the MPT adapter id. */ - if ((ioc = mpt_verify_adapter(dest->iocnum, iocpp)) < 0) { - printk(KERN_ERR MYNAM "::%s() @%d - ioc%d not found!\n", - myname, __LINE__, dest->iocnum); - return -ENXIO; /* (-6) No such device or address */ - } else { - dprintk((MYNAM "-perf: %s using mpt/ioc%x, target %02xh\n", - caller, dest->iocnum, dest->target)); + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + /* Verify the data transfer size is correct. + * Ignore the port setting. + */ + if (karg.hdr.maxDataSize != sizeof(struct mpt_ioctl_iocinfo)) { + printk(KERN_ERR "%s@%d::mptctl_getiocinfo - " + "Structure size mismatch. Command not completed.\n", + __FILE__, __LINE__); + return -EFAULT; + } + + /* Fill in the data and return the structure to the calling + * program + */ + if (ioc->chip_type == C1030) + karg.adapterType = MPT_IOCTL_INTERFACE_SCSI; + else + karg.adapterType = MPT_IOCTL_INTERFACE_FC; + + port = karg.hdr.port; + + karg.port = port; + pdev = (struct pci_dev *) ioc->pcidev; + + karg.pciId = pdev->device; + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); + karg.hwRev = revision; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + karg.subSystemDevice = pdev->subsystem_device; + karg.subSystemVendor = pdev->subsystem_vendor; +#endif + + /* Get number of devices + */ + if ( (sh = ioc->sh) != NULL) { + + /* sh->max_id = maximum target ID + 1 + */ + max_id = sh->max_id - 1; + hd = (MPT_SCSI_HOST *) sh->hostdata; + + /* Check all of the target structures and + * keep a counter. + */ + if (hd && hd->Targets) { + for (ii = 0; ii <= max_id; ii++) { + if (hd->Targets[ii]) + numDevices++; + } + } + } + karg.numDevices = numDevices; + + /* Set the BIOS and FW Version + */ + karg.FWVersion = ioc->facts.FWVersion.Word; + karg.BIOSVersion = ioc->biosVersion; + + /* Set the Version Strings. + */ + strncpy (karg.driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); + + karg.busChangeEvent = 0; + karg.hostId = ioc->pfacts[port].PortSCSIID; + karg.rsvd[0] = karg.rsvd[1] = 0; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(struct mpt_ioctl_iocinfo))) { + printk(KERN_ERR "%s@%d::mptctl_getiocinfo - " + "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_gettargetinfo - Query the host adapter for target information. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_gettargetinfo (unsigned long arg) +{ + struct mpt_ioctl_targetinfo *uarg = (struct mpt_ioctl_targetinfo *) arg; + struct mpt_ioctl_targetinfo karg; + MPT_ADAPTER *ioc; + struct Scsi_Host *sh; + MPT_SCSI_HOST *hd; + char *pmem; + int *pdata; + int iocnum; + int numDevices = 0; + unsigned int max_id; + int ii, jj, lun; + int maxWordsLeft; + int numBytes; + u8 port; + + dctlprintk(("mptctl_gettargetinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { + printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - " + "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + /* Get the port number and set the maximum number of bytes + * in the returned structure. + * Ignore the port setting. + */ + numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); + maxWordsLeft = numBytes/sizeof(int); + port = karg.hdr.port; + + if (maxWordsLeft <= 0) { + printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n", + __FILE__, __LINE__); + return -ENOMEM; + } + + /* Fill in the data and return the structure to the calling + * program + */ + + /* struct mpt_ioctl_targetinfo does not contain sufficient space + * for the target structures so when the IOCTL is called, there is + * not sufficient stack space for the structure. Allocate memory, + * populate the memory, copy back to the user, then free memory. + * targetInfo format: + * bits 31-24: reserved + * 23-16: LUN + * 15- 8: Bus Number + * 7- 0: Target ID + */ + pmem = kmalloc(numBytes, GFP_KERNEL); + if (pmem == NULL) { + printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n", + __FILE__, __LINE__); + return -ENOMEM; + } + memset(pmem, 0, numBytes); + pdata = (int *) pmem; + + /* Get number of devices + */ + if ( (sh = ioc->sh) != NULL) { + + max_id = sh->max_id - 1; + hd = (MPT_SCSI_HOST *) sh->hostdata; + + /* Check all of the target structures. + * Save the Id and increment the counter, + * if ptr non-null. + * sh->max_id = maximum target ID + 1 + */ + if (hd && hd->Targets) { + ii = 0; + while (ii <= max_id) { + if (hd->Targets[ii]) { + for (jj = 0; jj <= MPT_LAST_LUN; jj++) { + lun = (1 << jj); + if (hd->Targets[ii]->luns & lun) { + numDevices++; + *pdata = (jj << 16) | ii; + --maxWordsLeft; + + pdata++; + + if (maxWordsLeft <= 0) { + break; + } + } + } + } + ii++; + } } } + karg.numDevices = numDevices; + + /* Copy part of the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(struct mpt_ioctl_targetinfo))) { + printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + kfree(pmem); + return -EFAULT; + } + + /* Copy the remaining data from kernel memory to user memory + */ + if (copy_to_user((char *) uarg->targetInfo, pmem, numBytes)) { + printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - " + "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", + __FILE__, __LINE__, (void*)pdata); + kfree(pmem); + return -EFAULT; + } + + kfree(pmem); - return ioc; + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* MPT IOCTL Test function. + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_readtest (unsigned long arg) +{ + struct mpt_ioctl_test *uarg = (struct mpt_ioctl_test *) arg; + struct mpt_ioctl_test karg; + MPT_ADAPTER *ioc; + int iocnum; + + dctlprintk(("mptctl_readtest called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) { + printk(KERN_ERR "%s@%d::mptctl_readtest - " + "Unable to read in mpt_ioctl_test struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_readtest() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } -/* Treat first N blocks of disk as sacred! */ -#define SACRED_BLOCKS 100 - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -static int -mpt_ioctl_rwperf(unsigned long arg) -{ - struct mpt_raw_r_w kPerfInfo; - /* NOTE: local copy, on stack==KERNEL_SPACE! */ - u8 target, targetM; - u8 lun, lunM; - u8 scsiop; - int qdepth; - int iters; - int cache_sz; - u32 xferbytes; - u32 scsidir; - u32 qtag; - u32 scsictl; - u32 sgdir; - u32 blkno; - u32 sbphys; - SGESimple32_t *sgl; - dma_addr_t sgl_dma; - struct buflist *buflist; - SGESimple32_t *sgOut, *sgIn; - int numfrags; - u32 *msg; - int i; - int ioc; - MPT_FRAME_HDR *mf; - MPT_ADAPTER *iocp; - int sgfragcpycnt; - int blklo, blkhi; - u8 nextchainoffset; - u8 *SenseBuf; - dma_addr_t SenseBufDMA; - char *myname = "_rwperf()"; - - dprintk((KERN_INFO "%s - starting...\n", myname)); - - /* Validate target device */ - if ((ioc = mpt_ioctl_rwperf_init(&kPerfInfo, arg, myname, &iocp)) < 0) - return ioc; - - /* Allocate DMA'able memory for the sense buffer. */ - SenseBuf = pci_alloc_consistent(iocp->pcidev, 256, &SenseBufDMA); - - /* set perf parameters from input */ - target = kPerfInfo.target & 0x0FF; - targetM = target & myMAX_T_MASK; - lun = kPerfInfo.lun & 0x1F; // LUN=31 max - lunM = lun & myMAX_L_MASK; - qdepth = kPerfInfo.qdepth; - iters = kPerfInfo.iters; - xferbytes = ((u32)kPerfInfo.nblks)<<9; - - DevInUse[targetM][lunM] = 1; - DevIosCount[targetM][lunM] = 0; - - cache_sz = kPerfInfo.cache_sz * 1024; // CacheSz in kB! - - /* ToDo: */ - /* get capacity (?) */ - - - // pre-build, one time, everything we can for speed in the loops below... - - scsiop = 0x28; // default to SCSI READ! - scsidir = MPI_SCSIIO_CONTROL_READ; // DATA IN (host<--ioc<--dev) - // 02000000 - qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; // 00000000 - - if (xferbytes == 0) { - // Do 0-byte READ!!! - // IMPORTANT! Need to set no SCSI DIR for this! - scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER; - } - - scsictl = scsidir | qtag; - - /* - * Set sgdir for DMA transfer. - */ -// sgdir = 0x04000000; // SCSI WRITE - sgdir = 0x00000000; // SCSI READ - - if ((sgl = kbuf_alloc_2_sgl(MAX(512,xferbytes), sgdir, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) - return -ENOMEM; - - sgfragcpycnt = MIN(10,numfrags); - nextchainoffset = 0; - if (numfrags > 10) - nextchainoffset = 0x1E; - - sbphys = SenseBufDMA; - - rwperf_reset = 0; - -// do { // target-loop - - blkno = SACRED_BLOCKS; // Treat first N blocks as sacred! - // FIXME! Skip option - blklo = blkno; - blkhi = blkno; - - do { // inner-loop - - while ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { - mb(); - schedule(); - barrier(); - } - msg = (u32*)mf; - - /* Start piecing the SCSIIORequest together */ - msg[0] = 0x00000000 | nextchainoffset<<16 | target; - msg[1] = 0x0000FF0A; // 255 sense bytes, 10-byte CDB! - msg[3] = lun << 8; - msg[4] = 0; - msg[5] = scsictl; - - // 16 bytes of CDB @ msg[6,7,8,9] are below... - - msg[6] = ( ((blkno & 0xFF000000) >> 8) - | ((blkno & 0x00FF0000) << 8) - | scsiop ); - msg[7] = ( (((u32)kPerfInfo.nblks & 0x0000FF00) << 16) - | ((blkno & 0x000000FF) << 8) - | ((blkno & 0x0000FF00) >> 8) ); - msg[8] = (kPerfInfo.nblks & 0x00FF); - msg[9] = 0; - - msg[10] = xferbytes; - -// msg[11] = 0xD0000100; -// msg[12] = sbphys; -// msg[13] = 0; - msg[11] = sbphys; - - // Copy the SGL... - if (xferbytes) { - sgOut = (SGESimple32_t*)&msg[12]; - sgIn = sgl; - for (i=0; i < sgfragcpycnt; i++) - *sgOut++ = *sgIn++; - } - - // fubar! QueueDepth issue!!! - while ( !rwperf_reset - && (DevIosCount[targetM][lunM] >= MIN(qdepth,64)) ) - { - mb(); - schedule(); - barrier(); - } - -// blkno += kPerfInfo.nblks; -// EXP Stuff! -// Try optimizing to certain cache size for the target! -// by keeping blkno within cache range if at all possible -#if 0 - if ( cache_sz - && ((2 * kPerfInfo.nblks) <= (cache_sz>>9)) - && ((blkno + kPerfInfo.nblks) > ((cache_sz>>9) + SACRED_BLOCKS)) ) - blkno = SACRED_BLOCKS; - else - blkno += kPerfInfo.nblks; + /* Fill in the data and return the structure to the calling + * program + */ + +#ifdef MFCNT + karg.chip_type = ioc->mfcnt; +#else + karg.chip_type = ioc->chip_type; #endif -// Ok, cheat! - if (cache_sz && ((blkno + kPerfInfo.nblks) > ((cache_sz>>9) + SACRED_BLOCKS)) ) - blkno = SACRED_BLOCKS; - else - blkno += kPerfInfo.nblks; + strncpy (karg.name, ioc->name, MPT_MAX_NAME); + strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH); - if (blkno > blkhi) - blkhi = blkno; + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, sizeof(struct mpt_ioctl_test))) { + printk(KERN_ERR "%s@%d::mptctl_readtest - " + "Unable to write out mpt_ioctl_test struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } - DevIosCount[targetM][lunM]++; + return 0; +} - /* - * Finally, post the request - */ - mpt_put_msg_frame(mptctl_id, ioc, mf); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptctl_eventquery - Query the host adapter for the event types + * that are being logged. + * @arg: User space argument + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_eventquery (unsigned long arg) +{ + struct mpt_ioctl_eventquery *uarg = (struct mpt_ioctl_eventquery *) arg; + struct mpt_ioctl_eventquery karg; + MPT_ADAPTER *ioc; + int iocnum; + dctlprintk(("mptctl_eventquery called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { + printk(KERN_ERR "%s@%d::mptctl_eventquery - " + "Unable to read in mpt_ioctl_eventquery struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } - /* let linux breath! */ - mb(); - schedule(); - barrier(); + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_eventquery() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } - //dprintk((KERN_DEBUG MYNAM "-perf: inner-loop, cnt=%d\n", iters)); + karg.eventEntries = ioc->eventLogSize; + karg.eventTypes = ioc->eventTypes; - } while ((--iters > 0) && !rwperf_reset); + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { + printk(KERN_ERR "%s@%d::mptctl_eventquery - " + "Unable to write out mpt_ioctl_eventquery struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + return 0; +} - dprintk((KERN_INFO MYNAM "-perf: DbG: blklo=%d, blkhi=%d\n", blklo, blkhi)); - dprintk((KERN_INFO MYNAM "-perf: target-loop, thisTarget=%d\n", target)); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_eventenable (unsigned long arg) +{ + struct mpt_ioctl_eventenable *uarg = (struct mpt_ioctl_eventenable *) arg; + struct mpt_ioctl_eventenable karg; + MPT_ADAPTER *ioc; + int iocnum; -// // TEMPORARY! -// target = 0; + dctlprintk(("mptctl_eventenable called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { + printk(KERN_ERR "%s@%d::mptctl_eventenable - " + "Unable to read in mpt_ioctl_eventenable struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } -// } while (target); + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_eventenable() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + if (ioc->events == NULL) { + /* Have not yet allocated memory - do so now. + */ + int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); + ioc->events = kmalloc(sz, GFP_KERNEL); + if (ioc->events == NULL) { + printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); + return -ENOMEM; + } + memset(ioc->events, 0, sz); + ioc->alloc_total += sz; - if (DevIosCount[targetM][lunM]) { - dprintk((KERN_INFO " DbG: DevIosCount[%d][%d]=%d\n", - targetM, lunM, DevIosCount[targetM][lunM])); - } + ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE; + ioc->eventContext = 0; + } - while (DevIosCount[targetM][lunM]) { - //dprintk((KERN_DEBUG " DbG: Waiting... DevIosCount[%d][%d]=%d\n", - // targetM, lunM, DevIosCount[targetM][lunM])); - mb(); - schedule(); - barrier(); - } - DevInUse[targetM][lunM] = 0; + /* Update the IOC event logging flag. + */ + ioc->eventTypes = karg.eventTypes; - pci_free_consistent(iocp->pcidev, 256, SenseBuf, SenseBufDMA); + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +mptctl_eventreport (unsigned long arg) +{ + struct mpt_ioctl_eventreport *uarg = (struct mpt_ioctl_eventreport *) arg; + struct mpt_ioctl_eventreport karg; + MPT_ADAPTER *ioc; + int iocnum; + int numBytes, maxEvents, max; + + dctlprintk(("mptctl_eventreport called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) { + printk(KERN_ERR "%s@%d::mptctl_eventreport - " + "Unable to read in mpt_ioctl_eventreport struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_eventreport() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); + maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS); - if (sgl) - kfree_sgl(sgl, sgl_dma, buflist, iocp); - dprintk((KERN_INFO " *** done ***\n")); + max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents; - return 0; + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if ((max < 1) || !ioc->events) + return -ENODATA; + + /* Copy the data from kernel memory to user memory + */ + numBytes = max * sizeof(MPT_IOCTL_EVENTS); + if (copy_to_user((char *) uarg->eventData, ioc->events, numBytes)) { + printk(KERN_ERR "%s@%d::mptctl_eventreport - " + "Unable to write out mpt_ioctl_eventreport struct @ %p\n", + __FILE__, __LINE__, (void*)ioc->events); + return -EFAULT; + } + + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int -mpt_ioctl_rwperf_status(unsigned long arg) +mptctl_replace_fw (unsigned long arg) { - struct mpt_raw_r_w kPerfInfo; - /* NOTE: local copy, on stack==KERNEL_SPACE! */ - MPT_ADAPTER *iocp; - int ioc; -// u8 targ; -// u8 lun; - int T, L; - char *myname = "_rwperf_status()"; + struct mpt_ioctl_replace_fw *uarg = (struct mpt_ioctl_replace_fw *) arg; + struct mpt_ioctl_replace_fw karg; + MPT_ADAPTER *ioc; + int iocnum; + u8 *mem = NULL; + dma_addr_t mem_dma; + int oldFwSize, newFwSize; + + dctlprintk(("mptctl_replace_fw called.\n")); + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) { + printk(KERN_ERR "%s@%d::mptctl_replace_fw - " + "Unable to read in mpt_ioctl_replace_fw struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_replace_fw() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + /* If not caching FW, return 0 + */ + if ((ioc->FWImage == NULL) && (ioc->alt_ioc) && (ioc->alt_ioc->FWImage == NULL)) { + return 0; + } + + + /* Allocate memory for the new FW image + */ + newFwSize = karg.newImageSize; + mem = pci_alloc_consistent(ioc->pcidev, newFwSize, &mem_dma); + if (mem == NULL) + return -ENOMEM; + + ioc->alloc_total += newFwSize; - dprintk((KERN_INFO "%s - starting...\n", myname)); + /* Copy the data from user memory to kernel space + */ + if (copy_from_user(mem, uarg->newImage, newFwSize)) { + printk(KERN_ERR "%s@%d::mptctl_replace_fw - " + "Unable to read in mpt_ioctl_replace_fw image @ %p\n", + __FILE__, __LINE__, (void*)uarg); + pci_free_consistent(ioc->pcidev, newFwSize, mem, mem_dma); + ioc->alloc_total -= newFwSize; + return -EFAULT; + } + + /* Free the old FW image + */ + oldFwSize = ioc->facts.FWImageSize; + if (ioc->FWImage) { + pci_free_consistent(ioc->pcidev, oldFwSize, ioc->FWImage, ioc->FWImage_dma); + ioc->alloc_total -= oldFwSize; + ioc->FWImage = mem; + ioc->FWImage_dma = mem_dma; + + } else if ((ioc->alt_ioc) && (ioc->alt_ioc->FWImage)) { + pci_free_consistent(ioc->pcidev, oldFwSize, ioc->alt_ioc->FWImage, ioc->alt_ioc->FWImage_dma); + ioc->alloc_total -= oldFwSize; + ioc->alt_ioc->FWImage = mem; + ioc->alt_ioc->FWImage_dma = mem_dma; + } - /* Get a pointer to the MPT adapter. */ - if ((ioc = mpt_ioctl_rwperf_init(&kPerfInfo, arg, myname, &iocp)) < 0) - return ioc; + /* Update IOCFactsReply + */ + ioc->facts.FWImageSize = newFwSize; + if (ioc->alt_ioc) + ioc->alt_ioc->facts.FWImageSize = newFwSize; - /* set perf parameters from input */ -// targ = kPerfInfo.target & 0xFF; -// lun = kPerfInfo.lun & 0x1F; + return 0; +} - for (T=0; T < myMAX_TARGETS; T++) - for (L=0; L < myMAX_LUNS; L++) - if (DevIosCount[T][L]) { - printk(KERN_INFO "%s: ioc%d->00:%02x:%02x" - ", IosCnt=%d\n", - myname, ioc, T, L, DevIosCount[T][L] ); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* MPT IOCTL MPTCOMMAND function. + * Cast the arg into the mpt_ioctl_mpt_command structure. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_mpt_command (unsigned long arg) +{ + struct mpt_ioctl_command *uarg = (struct mpt_ioctl_command *) arg; + struct mpt_ioctl_command karg; + MPT_ADAPTER *ioc; + int iocnum; + int rc; + + dctlprintk(("mptctl_command called.\n")); + + if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) { + printk(KERN_ERR "%s@%d::mptctl_mpt_command - " + "Unable to read in mpt_ioctl_command struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_mpt_command() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + rc = mptctl_do_mpt_command (karg, (char *) &uarg->MF, 0); + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_do_mpt_command (struct mpt_ioctl_command karg, char *mfPtr, int local) +{ + MPT_ADAPTER *ioc; + MPT_FRAME_HDR *mf = NULL; + MPIHeader_t *hdr; + MptSge_t *psge; + MptSge_t *this_sge = NULL; + MptSge_t *sglbuf = NULL; + struct buflist bufIn; /* data In buffer */ + struct buflist bufOut; /* data Out buffer */ + dma_addr_t sglbuf_dma; + dma_addr_t dma_addr; + int dir; /* PCI data direction */ + int sgSize = 0; /* Num SG elements */ + int this_alloc; + int iocnum, flagsLength; + int sz, rc = 0; + int msgContext; + u16 req_idx; + + dctlprintk(("mptctl_do_mpt_command called.\n")); + + if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + if (!ioc->ioctl) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "No memory available during driver init.\n", + __FILE__, __LINE__); + return -ENOMEM; + } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_TIMEOUT) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Busy with IOC Reset \n", __FILE__, __LINE__); + return -EBUSY; + } + + /* Verify that the final request frame will not be too large. + */ + sz = karg.dataSgeOffset * 4; + if (karg.dataInSize > 0) + sz += sizeof (MptSge_t); + if (karg.dataOutSize > 0) + sz += sizeof (MptSge_t); + + if ( sz > ioc->req_sz) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Request frame too large (%d) maximum (%d)\n", + __FILE__, __LINE__, sz, ioc->req_sz); + return -EFAULT; + } + + /* Get a free request frame and save the message context. + */ + if ((mf = mpt_get_msg_frame(mptctl_id, ioc->id)) == NULL) + return -EAGAIN; + + hdr = (MPIHeader_t *) mf; + msgContext = le32_to_cpu(hdr->MsgContext); + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + + /* Copy the request frame + * Reset the saved message context. + */ + if (local) { + /* Request frame in kernel space + */ + memcpy((char *)mf, (char *) mfPtr, karg.dataSgeOffset * 4); + } else { + /* Request frame in user space + */ + if (copy_from_user((char *)mf, (char *) mfPtr, + karg.dataSgeOffset * 4)){ + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to read MF from mpt_ioctl_command struct @ %p\n", + __FILE__, __LINE__, (void*)mfPtr); + rc = -EFAULT; + goto done_free_mem; + } + } + hdr->MsgContext = cpu_to_le32(msgContext); + + + /* Verify that this request is allowed. + */ + switch (hdr->Function) { + case MPI_FUNCTION_IOC_FACTS: + case MPI_FUNCTION_PORT_FACTS: + case MPI_FUNCTION_CONFIG: + case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: + case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: + case MPI_FUNCTION_FW_UPLOAD: + case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + case MPI_FUNCTION_FW_DOWNLOAD: + break; + + case MPI_FUNCTION_SCSI_IO_REQUEST: + if (ioc->sh) { + SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; + VirtDevice *pTarget = NULL; + MPT_SCSI_HOST *hd = NULL; + int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; + int scsidir = 0; + int target = (int) pScsiReq->TargetID; + int dataSize; + + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; + + /* verify that app has not requested + * more sense data than driver + * can provide, if so, reset this parameter + * set the sense buffer pointer low address + * update the control field to specify Q type + */ + if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + + pScsiReq->SenseBufferLowAddr = + cpu_to_le32(ioc->sense_buf_low_dma + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + if ( (hd = (MPT_SCSI_HOST *) ioc->sh->hostdata)) { + if (hd->Targets) + pTarget = hd->Targets[target]; } + if (pTarget &&(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) + qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; + + /* Have the IOCTL driver set the direction based + * on the dataOutSize (ordering issue with Sparc). + */ + if (karg.dataOutSize > 0 ) { + scsidir = MPI_SCSIIO_CONTROL_WRITE; + dataSize = karg.dataOutSize; + } + else { + scsidir = MPI_SCSIIO_CONTROL_READ; + dataSize = karg.dataInSize; + } + + pScsiReq->Control = cpu_to_le32(scsidir | qtag); + pScsiReq->DataLength = cpu_to_le32(dataSize); + + } else { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + case MPI_FUNCTION_RAID_ACTION: + /* Just add a SGE + */ + break; + + case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + if (ioc->sh) { + SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; + int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; + int scsidir = MPI_SCSIIO_CONTROL_READ; + int dataSize; + + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; + + /* verify that app has not requested + * more sense data than driver + * can provide, if so, reset this parameter + * set the sense buffer pointer low address + * update the control field to specify Q type + */ + if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + + pScsiReq->SenseBufferLowAddr = + cpu_to_le32(ioc->sense_buf_low_dma + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + /* All commands to physical devices are tagged + */ + + /* Have the IOCTL driver set the direction based + * on the dataOutSize (ordering issue with Sparc). + */ + if (karg.dataOutSize > 0 ) { + scsidir = MPI_SCSIIO_CONTROL_WRITE; + dataSize = karg.dataOutSize; + } + else { + scsidir = MPI_SCSIIO_CONTROL_READ; + dataSize = karg.dataInSize; + } + + pScsiReq->Control = cpu_to_le32(scsidir | qtag); + pScsiReq->DataLength = cpu_to_le32(dataSize); + + } else { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "SCSI driver is not loaded. \n", + __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } + break; + + default: + /* + * MPI_FUNCTION_IOC_INIT + * MPI_FUNCTION_PORT_ENABLE + * MPI_FUNCTION_TARGET_CMD_BUFFER_POST + * MPI_FUNCTION_TARGET_ASSIST + * MPI_FUNCTION_TARGET_STATUS_SEND + * MPI_FUNCTION_TARGET_MODE_ABORT + * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET + * MPI_FUNCTION_IO_UNIT_RESET + * MPI_FUNCTION_HANDSHAKE + * MPI_FUNCTION_REPLY_FRAME_REMOVAL + * MPI_FUNCTION_EVENT_NOTIFICATION + * (driver handles event notification) + * MPI_FUNCTION_EVENT_ACK + * MPI_FUNCTION_SCSI_TASK_MGMT + */ + + /* What to do with these??? CHECK ME!!! + MPI_FUNCTION_FC_LINK_SRVC_BUF_POST + MPI_FUNCTION_FC_LINK_SRVC_RSP + MPI_FUNCTION_FC_ABORT + MPI_FUNCTION_FC_PRIMITIVE_SEND + MPI_FUNCTION_LAN_SEND + MPI_FUNCTION_LAN_RECEIVE + MPI_FUNCTION_LAN_RESET + */ + + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Illegal request (function 0x%x) \n", + __FILE__, __LINE__, hdr->Function); + rc = -EFAULT; + goto done_free_mem; + } + + /* Add the SGL ( at most one data in SGE and one data out SGE ) + * In the case of two SGE's - the data out (write) will always + * preceede the data in (read) SGE. psgList is used to free the + * allocated memory. + */ + psge = (MptSge_t *) ( ((int *) mf) + karg.dataSgeOffset); + flagsLength = 0; + + /* bufIn and bufOut are used for user to kernel space transfers + */ + bufIn.kptr = bufOut.kptr = NULL; + bufIn.len = bufOut.len = 0; + + if (karg.dataOutSize > 0 ) + sgSize ++; + + if (karg.dataInSize > 0 ) + sgSize ++; + + if (sgSize > 0) { + + /* Allocate memory for the SGL. + * Used to free kernel memory once + * the MF is freed. + */ + sglbuf = pci_alloc_consistent (ioc->pcidev, + sgSize*sizeof(MptSge_t), &sglbuf_dma); + if (sglbuf == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } + this_sge = sglbuf; + + /* Set up the dataOut memory allocation */ + if (karg.dataOutSize > 0) { + dir = PCI_DMA_TODEVICE; + if (karg.dataInSize > 0 ) { + flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_DIRECTION | + MPT_SGE_ADDRESS_SIZE ) + << MPI_SGE_FLAGS_SHIFT; + } else { + flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; + } + flagsLength |= karg.dataOutSize; + + this_alloc = karg.dataOutSize; + bufOut.len = this_alloc; + bufOut.kptr = pci_alloc_consistent( + ioc->pcidev, this_alloc, &dma_addr); + + if (bufOut.kptr == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } else { + /* Copy user data to kernel space. + */ + if (copy_from_user(bufOut.kptr, + karg.dataOutBufPtr, + bufOut.len)) { + + printk(KERN_ERR + "%s@%d::mptctl_do_mpt_command - Unable " + "to read user data " + "struct @ %p\n", + __FILE__, __LINE__,(void*)karg.dataOutBufPtr); + rc = -EFAULT; + goto done_free_mem; + } + + /* Set up this SGE. + * Copy to MF and to sglbuf + */ + + psge->FlagsLength = cpu_to_le32 (flagsLength); + cpu_to_leXX(dma_addr, psge->Address); + psge++; + + this_sge->FlagsLength=cpu_to_le32(flagsLength); + cpu_to_leXX(dma_addr, this_sge->Address); + this_sge++; + } + } + + if (karg.dataInSize > 0) { + dir = PCI_DMA_FROMDEVICE; + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; + flagsLength |= karg.dataInSize; + + this_alloc = karg.dataInSize; + bufIn.len = this_alloc; + bufIn.kptr = pci_alloc_consistent(ioc->pcidev, + this_alloc, &dma_addr); + if (bufIn.kptr == NULL) { + rc = -ENOMEM; + goto done_free_mem; + } else { + /* Set up this SGE + * Copy to MF and to sglbuf + */ + psge->FlagsLength = cpu_to_le32 (flagsLength); + cpu_to_leXX(dma_addr, psge->Address); + + this_sge->FlagsLength=cpu_to_le32(flagsLength); + cpu_to_leXX(dma_addr, this_sge->Address); + this_sge++; + } + } + } else { + /* Add a NULL SGE + */ + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; + psge->FlagsLength = cpu_to_le32 (flagsLength); + cpu_to_leXX( (dma_addr_t) -1, psge->Address); + } + + /* The request is complete. Set the timer parameters + * and issue the request. + */ + if (karg.timeout > 0) { + ioc->ioctl->timer.expires = jiffies + HZ*karg.timeout; + } else { + ioc->ioctl->timer.expires = jiffies + HZ*MPT_IOCTL_DEFAULT_TIMEOUT; + } + + ioc->ioctl->wait_done = 0; + ioc->ioctl->status |= MPT_IOCTL_STATUS_TIMER_ACTIVE; + add_timer(&ioc->ioctl->timer); + + mpt_put_msg_frame(mptctl_id, ioc->id, mf); + wait_event(mptctl_wait, ioc->ioctl->wait_done); + + /* The command is complete. * Return data to the user. + * + * If command completed, mf has been freed so cannot + * use this memory. + * + * If timeout, a recovery mechanism has been called. + * Need to free the mf. + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_TIMEOUT) { + + /* A timeout - there is no data to return to the + * the user other than an error. + * The timer callback deleted the + * timer and reset the adapter queues. + */ + printk(KERN_WARNING "%s@%d::mptctl_do_mpt_command - " + "Timeout Occurred on IOCTL! Resetting IOC.\n", __FILE__, __LINE__); + rc = -ETIME; + + /* Free memory and return to the calling function + */ + goto done_free_mem; + + } else { + /* Callback freed request frame. + */ + mf = NULL; + + /* If a valid reply frame, copy to the user. + * Offset 2: reply length in U32's + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { + if (karg.maxReplyBytes < ioc->reply_sz) { + sz = MIN(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); + } else { + sz = MIN(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); + } + + if (sz > 0) { + if (copy_to_user((char *)karg.replyFrameBufPtr, + &ioc->ioctl->ReplyFrame, sz)){ + + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to write out reply frame %p\n", + __FILE__, __LINE__, (void*)karg.replyFrameBufPtr); + rc = -ENODATA; + goto done_free_mem; + } + } + } + + /* If valid sense data, copy to user. + */ + if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { + sz = MIN(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); + if (sz > 0) { + if (copy_to_user((char *)karg.senseDataPtr, ioc->ioctl->sense, sz)) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to write sense data to user %p\n", + __FILE__, __LINE__, + (void*)karg.senseDataPtr); + rc = -ENODATA; + goto done_free_mem; + } + } + } + + /* If the overall status is _GOOD and data in, copy data + * to user. + */ + if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && + (karg.dataInSize > 0) && (bufIn.kptr)) { + + if (copy_to_user((char *)karg.dataInBufPtr, + bufIn.kptr, karg.dataInSize)) { + printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " + "Unable to write data to user %p\n", + __FILE__, __LINE__, + (void*)karg.dataInBufPtr); + rc = -ENODATA; + } + } + } + +done_free_mem: + /* Clear status bits. + */ + ioc->ioctl->status = 0; + + if (sglbuf) { + this_sge = sglbuf; + + /* Free the allocated memory. + */ + if (bufOut.kptr != NULL ) { + + leXX_to_cpu (dma_addr, this_sge->Address); + + this_sge++; /* go to next structure */ + this_alloc = bufOut.len; + pci_free_consistent(ioc->pcidev, + this_alloc, (void *) &bufOut, dma_addr); + } + + if (bufIn.kptr != NULL ) { + leXX_to_cpu (dma_addr, this_sge->Address); + this_alloc = bufIn.len; + + pci_free_consistent(ioc->pcidev, + this_alloc, (void *) &bufIn, dma_addr); + } + + this_alloc = sgSize * sizeof(MptSge_t); + pci_free_consistent(ioc->pcidev, + this_alloc, (void *) sglbuf, sglbuf_dma); + + } + + /* mf will be null if allocation failed OR + * if command completed OK (callback freed) + */ + if (mf) + mpt_free_msg_frame(mptctl_id, ioc->id, mf); + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Routine for the Compaq IOCTL commands. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + * -ENOMEM if memory allocation error + */ +static int +mptctl_compaq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int iocnum = 0; + unsigned iocnumX = 0; + int ret; + int nonblock = (file->f_flags & O_NONBLOCK); + MPT_ADAPTER *iocp = NULL; + + if (cmd == CPQFCTS_SCSI_PASSTHRU) { + /* Update the iocnum */ + if (copy_from_user(&iocnumX, (int *)arg, sizeof(int))) { + printk(KERN_ERR "%s::mptctl_compaq_ioctl() @%d - " + "Unable to read controller number @ %p\n", + __FILE__, __LINE__, (void*)arg); + return -EFAULT; + } + iocnumX &= 0xFF; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) { + printk(KERN_ERR "%s::mptctl_compaq_ioctl() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnumX); + return -ENODEV; + } + + /* All of these commands require an interrupt or + * are unknown/illegal. + */ + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + dctlprintk((MYIOC_s_INFO_FMT ": mptctl_compaq_ioctl()\n", iocp->name)); + + switch(cmd) { + case CPQFCTS_GETPCIINFO: + ret = mptctl_cpq_getpciinfo(arg); + break; + case CPQFCTS_GETDRIVER: + ret = mptctl_cpq_getdriver(arg); + break; + case CPQFCTS_CTLR_STATUS: + ret = mptctl_cpq_ctlr_status(arg); + break; + case CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS: + ret = mptctl_cpq_target_address(arg); + break; + case CPQFCTS_SCSI_PASSTHRU: + ret = mptctl_cpq_passthru(arg); + break; + default: + ret = -EINVAL; + } + + up(&mptctl_syscall_sem_ioc[iocp->id]); + + return ret; + +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_getpciinfo - Get PCI Information in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ +static int +mptctl_cpq_getpciinfo(unsigned long arg) +{ + cpqfc_pci_info_struct *uarg = (cpqfc_pci_info_struct *) arg; + cpqfc_pci_info_struct karg; + MPT_ADAPTER *ioc; + struct pci_dev *pdev; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + int iocnum = 0, iocnumX = 0; + dma_addr_t buf_dma; + u8 *pbuf = NULL; + int failed; + + dctlprintk((": mptctl_cpq_pciinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(cpqfc_pci_info_struct))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_pciinfo - " + "Unable to read in cpqfc_pci_info_struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_pciinfo() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + pdev = (struct pci_dev *) ioc->pcidev; + + /* Populate the structure. */ + karg.bus = pdev->bus->number; + karg.bus_type = 1; /* 1 = PCI; 4 = unknown */ + karg.device_fn = PCI_FUNC(pdev->devfn); + karg.slot_number = PCI_SLOT(pdev->devfn); + karg.vendor_id = pdev->vendor; + karg.device_id = pdev->device; + karg.board_id = (karg.device_id | (karg.vendor_id << 16)); + karg.class_code = pdev->class; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) + karg.sub_vendor_id = pdev->subsystem_vendor; + karg.sub_device_id = pdev->subsystem_device; +#endif + + /* Issue a config request to get the device serial number + */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; /* read */ + cfg.timeout = 10; + + failed = 1; + + if (mpt_config(ioc, &cfg) == 0) { + if (cfg.hdr->PageLength > 0) { + /* Issue the second config page request */ + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) == 0) { + ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; + strncpy(karg.serial_number, pdata->BoardTracerNumber, 17); + failed = 0; + } + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; + } + } + } + if (failed) + strncpy(karg.serial_number, " ", 17); + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(cpqfc_pci_info_struct))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_pciinfo - " + "Unable to write out cpqfc_pci_info_struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_getdriver - Get Driver Version in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ +static int +mptctl_cpq_getdriver(unsigned long arg) +{ + int *uarg = (int *)arg; + int karg; + MPT_ADAPTER *ioc = NULL; + int iocnum = 0, iocnumX = 0; + int ii, jj; + char version[10]; + char val; + char *vptr = NULL; + char *pptr = NULL; + + dctlprintk((": mptctl_cpq_getdriver called.\n")); + if (copy_from_user(&karg, uarg, sizeof(int))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_getdriver - " + "Unable to read in struct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_getdriver() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + strncpy(version, MPT_LINUX_VERSION_COMMON, 8); + + karg = 0; + vptr = version; + ii = 3; + while (ii > 0) { + pptr = strchr(vptr, '.'); + if (pptr) { + *pptr = '\0'; + val = 0; + for (jj=0; vptr[jj]>='0' && vptr[jj]<='9'; jj++) + val = 10 * val + (vptr[jj] - '0'); + karg |= (val << (8*ii)); + pptr++; + vptr = pptr; + } else + break; + ii--; + } + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(int))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_getdriver - " + "Unable to write out stuct @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_ctlr_status - Get controller status in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + */ static int -mpt_ioctl_rwperf_reset(unsigned long arg) +mptctl_cpq_ctlr_status(unsigned long arg) { - struct mpt_raw_r_w kPerfInfo; - /* NOTE: local copy, on stack==KERNEL_SPACE! */ - MPT_ADAPTER *iocp; - int ioc; -// u8 targ; -// u8 lun; - int T, L; - int i; - char *myname = "_rwperf_reset()"; - - dprintk((KERN_INFO "%s - starting...\n", myname)); - - /* Get MPT adapter id. */ - if ((ioc = mpt_ioctl_rwperf_init(&kPerfInfo, arg, myname, &iocp)) < 0) - return ioc; - - /* set perf parameters from input */ -// targ = kPerfInfo.target & 0xFF; -// lun = kPerfInfo.lun & 0x1F; - - rwperf_reset = 1; - for (i=0; i < 1000000; i++) { - mb(); - schedule(); - barrier(); - } - rwperf_reset = 0; - - for (T=0; T < myMAX_TARGETS; T++) - for (L=0; L < myMAX_LUNS; L++) - if (DevIosCount[T][L]) { - printk(KERN_INFO "%s: ioc%d->00:%02x:%02x, " - "IosCnt RESET! (from %d to 0)\n", - myname, ioc, T, L, DevIosCount[T][L] ); - DevIosCount[T][L] = 0; - DevInUse[T][L] = 0; + cpqfc_ctlr_status *uarg = (cpqfc_ctlr_status *) arg; + cpqfc_ctlr_status karg; + MPT_ADAPTER *ioc; + int iocnum = 0, iocnumX = 0; + + dctlprintk((": mptctl_cpq_pciinfo called.\n")); + if (copy_from_user(&karg, uarg, sizeof(cpqfc_ctlr_status))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_ctlr_status - " + "Unable to read in cpqfc_ctlr_status @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_ctlr_status() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + karg.status = ioc->last_state; + karg.offline_reason = 0; + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(cpqfc_ctlr_status))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_ctlr_status - " + "Unable to write out cpqfc_ctlr_status @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_target_address - Get WWN Information in format desired by Compaq + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ +static int +mptctl_cpq_target_address(unsigned long arg) +{ + Scsi_FCTargAddress *uarg = (Scsi_FCTargAddress *) arg; + Scsi_FCTargAddress karg; + MPT_ADAPTER *ioc; + int iocnum = 0, iocnumX = 0; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; + dma_addr_t buf_dma; + u8 *pbuf = NULL; + FCPortPage0_t *ppp0; + int ii, failed; + u32 low, high; + + dctlprintk((": mptctl_cpq_target_address called.\n")); + if (copy_from_user(&karg, uarg, sizeof(Scsi_FCTargAddress))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_target_address - " + "Unable to read in Scsi_FCTargAddress @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_target_address() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + karg.host_port_id = 0; + + /* Issue a config request to get the device wwn + */ + hdr.PageVersion = 0; + hdr.PageLength = 0; + hdr.PageNumber = 0; + hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; + cfg.hdr = &hdr; + cfg.physAddr = -1; + cfg.pageAddr = 0; + cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; + cfg.dir = 0; /* read */ + cfg.timeout = 10; + + failed = 1; + + if (mpt_config(ioc, &cfg) == 0) { + if (cfg.hdr->PageLength > 0) { + /* Issue the second config page request */ + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + + pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); + if (pbuf) { + cfg.physAddr = buf_dma; + if (mpt_config(ioc, &cfg) == 0) { + ppp0 = (FCPortPage0_t *) pbuf; + + low = le32_to_cpu(ppp0->WWNN.Low); + high = le32_to_cpu(ppp0->WWNN.High); + + for (ii = 0; ii < 4; ii++) { + karg.host_wwn[7-ii] = low & 0xFF; + karg.host_wwn[3-ii] = high & 0xFF; + low = (low >> 8); + high = (high >> 8); + } + failed = 0; + } + pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); + pbuf = NULL; } + } + } + + if (failed) { + for (ii = 7; ii >= 0; ii--) + karg.host_wwn[ii] = 0; + } + + /* Copy the data from kernel memory to user memory + */ + if (copy_to_user((char *)arg, &karg, + sizeof(Scsi_FCTargAddress))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_target_address - " + "Unable to write out Scsi_FCTargAddress @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_cpq_passthru - Construct and issue a SCSI IO Passthru + * + * Requires the SCSI host driver to be loaded. + * I386 version. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ static int -mpt_ioctl_scsi_cmd(unsigned long arg) +mptctl_cpq_passthru(unsigned long arg) { - return -ENOSYS; + VENDOR_IOCTL_REQ *uarg = (VENDOR_IOCTL_REQ *) arg; + VENDOR_IOCTL_REQ karg; + cpqfc_passthru_t kpass; + MPT_ADAPTER *ioc; + int iocnum = 0, iocnumX = 0; + int rc; + + dctlprintk((": mptctl_cpq_passthru called.\n")); + if (copy_from_user(&karg, uarg, sizeof(VENDOR_IOCTL_REQ))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_passthru - " + "Unable to read in VENDOR_IOCTL_REQ @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + /* Set the IOC number */ + iocnumX = karg.lc & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR "%s::mptctl_cpq_passthru() @%d - ioc%d not found!\n", + __FILE__, __LINE__, iocnum); + return -ENODEV; + } + + if (ioc->sh == NULL) { + printk(KERN_ERR "%s::mptctl_cpq_passthru() @%d - SCSI Host driver not loaded!\n", + __FILE__, __LINE__); + return -EFAULT; + } + + /* Read in the second buffer */ + if (copy_from_user(&kpass, uarg->argp, sizeof(cpqfc_passthru_t))) { + printk(KERN_ERR "%s@%d::mptctl_cpq_passthru - " + "Unable to read in cpqfc_passthru_t @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + + /* Generate the SCSI IO command and issue */ + rc = mptctl_compaq_scsiio(&karg, &kpass); + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptctl_compaq_scsiio - Reformat Compaq structures into driver structures + * Call the generic _do_mpt_command function. + * + * Requires the SCSI host driver to be loaded. + * I386 version. + * + * Outputs: None. + * Return: 0 if successful + * -EBUSY if previous command timout and IOC reset is not complete. + * -EFAULT if data unavailable + * -ENODEV if no such device/adapter + * -ETIME if timer expires + */ +static int +mptctl_compaq_scsiio(VENDOR_IOCTL_REQ *pVenReq, cpqfc_passthru_t *pPass) +{ + struct mpt_ioctl_command karg; + SCSIIORequest_t request ; + SCSIIORequest_t *pMf; + int ii, rc; + u8 opcode; + + /* Fill in parameters to karg */ + karg.hdr.iocnum = pVenReq->lc; + karg.hdr.port = 0; + karg.hdr.maxDataSize = 0; /* not used */ + karg.timeout = 0; /* use default */ + + karg.replyFrameBufPtr = NULL; /* no reply data */ + karg.maxReplyBytes = 0; + + karg.senseDataPtr = pPass->sense_data; + karg.maxSenseBytes = pPass->sense_len; /* max is 40 */ + + if (pPass->rw_flag == MPT_COMPAQ_WRITE) { + karg.dataOutBufPtr = pPass->bufp; + karg.dataOutSize = pPass->len; + karg.dataInBufPtr = NULL; + karg.dataInSize = 0; + } else { + karg.dataInBufPtr = pPass->bufp; + karg.dataInSize = pPass->len; + karg.dataOutBufPtr = NULL; + karg.dataOutSize = 0; + } + + karg.dataSgeOffset = (sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION))/4; + + /* Construct the Message frame */ + pMf = &request; + + pMf->TargetID = (u8) pVenReq->ld; /* ???? FIXME */ + pMf->Bus = (u8) pPass->bus; + pMf->ChainOffset = 0; + pMf->Function = MPI_FUNCTION_SCSI_IO_REQUEST; + + /* May need some tweaking here */ + opcode = (u8) pPass->cdb[0]; + if (opcode < 0x20) + pMf->CDBLength = 6; + else if (opcode < 0x60) + pMf->CDBLength = 10; + else if ((opcode < 0xC0) && (opcode >= 0xA0)) + pMf->CDBLength = 12; + else + pMf->CDBLength = 16; + + pMf->SenseBufferLength = karg.maxSenseBytes; /* max is 40 */ + pMf->Reserved = 0; + pMf->MsgFlags = 0; /* set later */ + pMf->MsgContext = 0; /* set later */ + + for (ii = 0; ii < 8; ii++) + pMf->LUN[ii] = 0; + pMf->LUN[1] = 0; /* ???? FIXME */ + + /* Tag values set by _do_mpt_command */ + if (pPass->rw_flag == MPT_COMPAQ_WRITE) + pMf->Control = MPI_SCSIIO_CONTROL_WRITE; + else + pMf->Control = MPI_SCSIIO_CONTROL_READ; + + for (ii = 0; ii < 16; ii++) + pMf->CDB[ii] = pPass->cdb[ii]; + + pMf->DataLength = pPass->len; + + /* All remaining fields are set by the next function + */ + rc = mptctl_do_mpt_command (karg, (char *)pMf, 1); + return rc; } + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,51) @@ -1110,7 +2749,7 @@ llseek: no_llseek, read: mptctl_read, write: mptctl_write, - ioctl: mpt_ioctl, + ioctl: mptctl_ioctl, open: mptctl_open, release: mptctl_release, }; @@ -1133,18 +2772,15 @@ unsigned long, struct file *)); int unregister_ioctl32_conversion(unsigned int cmd); - -struct mpt_fw_xfer32 { - unsigned int iocnum; - unsigned int fwlen; - u32 bufp; -}; - -#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32) - extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* sparc32_XXX functions are used to provide a conversion between + * pointers and u32's. If the arg does not contain any pointers, then + * a specialized function (sparc32_XXX) is not needed. If the arg + * does contain pointer(s), then the specialized function is used + * to ensure the structure contents is properly processed by mptctl. + */ static int sparc32_mptfwxfer_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *filp) @@ -1156,7 +2792,7 @@ int nonblock = (filp->f_flags & O_NONBLOCK); int ret; - dprintk((KERN_INFO MYNAM "::sparc32_mptfwxfer_ioctl() called\n")); + dctlprintk((KERN_INFO MYNAM "::sparc32_mptfwxfer_ioctl() called\n")); if (copy_from_user(&kfw32, (char *)arg, sizeof(kfw32))) return -EFAULT; @@ -1177,13 +2813,131 @@ kfw.fwlen = kfw32.fwlen; kfw.bufp = (void *)(unsigned long)kfw32.bufp; - ret = mpt_ioctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); + + up(&mptctl_syscall_sem_ioc[iocp->id]); + + return ret; +} + +static int +sparc32_mpt_command(unsigned int fd, unsigned int cmd, + unsigned long arg, struct file *filp) +{ + struct mpt_ioctl_command32 karg32; + struct mpt_ioctl_command32 *uarg = (struct mpt_ioctl_command32 *) arg; + struct mpt_ioctl_command karg; + MPT_ADAPTER *iocp = NULL; + int iocnum, iocnumX; + int nonblock = (filp->f_flags & O_NONBLOCK); + int ret; + + dctlprintk((KERN_INFO MYNAM "::sparc32_mpt_command() called\n")); + + if (copy_from_user(&karg32, (char *)arg, sizeof(karg32))) + return -EFAULT; + + /* Verify intended MPT adapter */ + iocnumX = karg32.hdr.iocnum & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || + (iocp == NULL)) { + printk(KERN_ERR MYNAM "::sparc32_mpt_command @%d - ioc%d not found!\n", + __LINE__, iocnumX); + return -ENODEV; + } + + if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) + return ret; + + /* Copy data to karg */ + karg.hdr.iocnum = karg32.hdr.iocnum; + karg.hdr.port = karg32.hdr.port; + karg.timeout = karg32.timeout; + karg.maxReplyBytes = karg32.maxReplyBytes; + + karg.dataInSize = karg32.dataInSize; + karg.dataOutSize = karg32.dataOutSize; + karg.maxSenseBytes = karg32.maxSenseBytes; + karg.dataSgeOffset = karg32.dataSgeOffset; + + karg.replyFrameBufPtr = (char *)(unsigned long)karg32.replyFrameBufPtr; + karg.dataInBufPtr = (char *)(unsigned long)karg32.dataInBufPtr; + karg.dataOutBufPtr = (char *)(unsigned long)karg32.dataOutBufPtr; + karg.senseDataPtr = (char *)(unsigned long)karg32.senseDataPtr; + + /* Pass new structure to do_mpt_command + */ + ret = mptctl_do_mpt_command (karg, (char *) &uarg->MF, 0); up(&mptctl_syscall_sem_ioc[iocp->id]); return ret; } +static int +sparc32_mptctl_cpq_passthru(unsigned int fd, unsigned int cmd, + unsigned long arg, struct file *filp) +{ + VENDOR_IOCTL_REQ32 *uarg = (VENDOR_IOCTL_REQ32 *) arg; + VENDOR_IOCTL_REQ32 karg32; + VENDOR_IOCTL_REQ karg; + cpqfc_passthru32_t kpass32; + cpqfc_passthru_t kpass; + MPT_ADAPTER *ioc; + int nonblock = (filp->f_flags & O_NONBLOCK); + int iocnum = 0, iocnumX = 0; + int rc; + int ii; + + dctlprintk((KERN_INFO MYNAM "::sparc32_mptctl_cpq_passthru() called\n")); + + if (copy_from_user(&karg32, (char *)arg, sizeof(karg32))) + return -EFAULT; + + /* Verify intended MPT adapter */ + iocnumX = karg32.lc & 0xFF; + if (((iocnum = mpt_verify_adapter(iocnumX, &ioc)) < 0) || + (ioc == NULL)) { + printk(KERN_ERR MYNAM "::sparc32_mpt_command @%d - ioc%d not found!\n", + __LINE__, iocnumX); + return -ENODEV; + } + + if ((rc = mptctl_syscall_down(ioc, nonblock)) != 0) + return rc; + + /* Copy data to karg */ + karg.ld = karg32.ld; + karg.node = karg32.node; + karg.lc = karg32.lc; + karg.nexus = karg32.nexus; + karg.argp = (void *)(unsigned long)karg32.argp; + + /* Read in the second buffer */ + if (copy_from_user(&kpass32, karg.argp, sizeof(cpqfc_passthru32_t))) { + printk(KERN_ERR "%s@%d::sparc32_mptctl_cpq_passthru - " + "Unable to read in cpqfc_passthru_t @ %p\n", + __FILE__, __LINE__, (void*)uarg); + return -EFAULT; + } + + /* Copy the 32bit buffer to kpass */ + for (ii = 0; ii < 16; ii++) + kpass.cdb[ii] = kpass32.cdb[ii]; + kpass.bus = kpass32.bus; + kpass.pdrive = kpass32.pdrive; + kpass.len = kpass32.len; + kpass.sense_len = kpass32.sense_len; + kpass.bufp = (void *)(unsigned long)kpass32.bufp; + kpass.rw_flag = kpass32.rw_flag; + + /* Generate the SCSI IO command and issue */ + rc = mptctl_compaq_scsiio(&karg, &kpass); + + up(&mptctl_syscall_sem_ioc[ioc->id]); + return rc; +} + #endif /*} linux >= 2.3.x */ #endif /*} sparc */ @@ -1193,26 +2947,76 @@ int err; int i; int where = 1; + int sz; + u8 *mem; + MPT_ADAPTER *ioc = NULL; + int iocnum; show_mptmod_ver(my_NAME, my_VERSION); for (i=0; iioctl = (MPT_IOCTL *) mem; + ioc->ioctl->ioc = ioc; + init_timer (&ioc->ioctl->timer); + ioc->ioctl->timer.data = (unsigned long) ioc->ioctl; + ioc->ioctl->timer.function = mptctl_timer_expired; + } } #if defined(__sparc__) && defined(__sparc_v9__) /*{*/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) /*{*/ - err = register_ioctl32_conversion(MPTRWPERF, NULL); + err = register_ioctl32_conversion(MPTIOCINFO, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTTARGETINFO, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTTEST, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTEVENTQUERY, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTEVENTENABLE, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTEVENTREPORT, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTHARDRESET, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTCOMMAND32, sparc32_mpt_command); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(MPTFWDOWNLOAD32, + sparc32_mptfwxfer_ioctl); if (++where && err) goto out_fail; - err = register_ioctl32_conversion(MPTRWPERF_CHK, NULL); + err = register_ioctl32_conversion(CPQFCTS_GETPCIINFO, NULL); if (++where && err) goto out_fail; - err = register_ioctl32_conversion(MPTRWPERF_RESET, NULL); + err = register_ioctl32_conversion(CPQFCTS_CTLR_STATUS, NULL); if (++where && err) goto out_fail; - err = register_ioctl32_conversion(MPTFWDOWNLOAD32, sparc32_mptfwxfer_ioctl); + err = register_ioctl32_conversion(CPQFCTS_GETDRIVER, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS, NULL); + if (++where && err) goto out_fail; + err = register_ioctl32_conversion(CPQFCTS_SCSI_PASSTHRU32, sparc32_mptctl_cpq_passthru); if (++where && err) goto out_fail; #endif /*} linux >= 2.3.x */ #endif /*} sparc */ + /* Register this device */ if (misc_register(&mptctl_miscdev) == -1) { printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR); err = -EBUSY; @@ -1226,13 +3030,19 @@ * Install our handler */ ++where; - if ((mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER)) <= 0) { + if ((mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER)) < 0) { printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n"); misc_deregister(&mptctl_miscdev); err = -EBUSY; goto out_fail; } + if (mpt_reset_register(mptctl_id, mptctl_ioc_reset) == 0) { + dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); + } else { + /* FIXME! */ + } + return 0; out_fail: @@ -1241,35 +3051,72 @@ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) /*{*/ printk(KERN_ERR MYNAM ": ERROR: Failed to register ioctl32_conversion!" " (%d:err=%d)\n", where, err); - unregister_ioctl32_conversion(MPTRWPERF); - unregister_ioctl32_conversion(MPTRWPERF_CHK); - unregister_ioctl32_conversion(MPTRWPERF_RESET); + unregister_ioctl32_conversion(MPTIOCINFO); + unregister_ioctl32_conversion(MPTTARGETINFO); + unregister_ioctl32_conversion(MPTTEST); + unregister_ioctl32_conversion(MPTEVENTQUERY); + unregister_ioctl32_conversion(MPTEVENTENABLE); + unregister_ioctl32_conversion(MPTEVENTREPORT); + unregister_ioctl32_conversion(MPTHARDRESET); + unregister_ioctl32_conversion(MPTCOMMAND32); unregister_ioctl32_conversion(MPTFWDOWNLOAD32); + unregister_ioctl32_conversion(CPQFCTS_GETPCIINFO); + unregister_ioctl32_conversion(CPQFCTS_GETDRIVER); + unregister_ioctl32_conversion(CPQFCTS_CTLR_STATUS); + unregister_ioctl32_conversion(CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS); + unregister_ioctl32_conversion(CPQFCTS_SCSI_PASSTHRU32); #endif /*} linux >= 2.3.x */ #endif /*} sparc */ + for (i=0; iioctl) { + kfree ( ioc->ioctl ); + ioc->ioctl = NULL; + } + } + } return err; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ void mptctl_exit(void) { - -#if defined(__sparc__) && defined(__sparc_v9__) /*{*/ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) /*{*/ - unregister_ioctl32_conversion(MPTRWPERF); - unregister_ioctl32_conversion(MPTRWPERF_CHK); - unregister_ioctl32_conversion(MPTRWPERF_RESET); - unregister_ioctl32_conversion(MPTFWDOWNLOAD32); -#endif /*} linux >= 2.3.x */ -#endif /*} sparc */ + int i; + MPT_ADAPTER *ioc; + int iocnum; misc_deregister(&mptctl_miscdev); - printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n", + printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n", mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); - printk(KERN_INFO MYNAM ": Deregistered from Fusion MPT base driver\n"); + /* De-register reset handler from base module */ + mpt_reset_deregister(mptctl_id); + dprintk((KERN_INFO MYNAM ": Deregistered for IOC reset notifications\n")); + + /* De-register callback handler from base module */ mpt_deregister(mptctl_id); + printk(KERN_INFO MYNAM ": Deregistered from Fusion MPT base driver\n"); + + /* Free allocated memory */ + for (i=0; iioctl) { + kfree ( ioc->ioctl ); + ioc->ioctl = NULL; + } + } + } } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff -Nru a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/message/fusion/mptctl.h Thu May 30 21:28:59 2002 @@ -0,0 +1,395 @@ +/* + * linux/drivers/message/fusion/mptioctl.h + * Fusion MPT misc device (ioctl) driver. + * For use with PCI chip/adapter(s): + * LSIFC9xx/LSI409xx Fibre Channel + * running LSI Logic Fusion MPT (Message Passing Technology) firmware. + * + * Credits: + * This driver would not exist if not for Alan Cox's development + * of the linux i2o driver. + * + * A huge debt of gratitude is owed to David S. Miller (DaveM) + * for fixing much of the stupid and broken stuff in the early + * driver while porting to sparc64 platform. THANK YOU! + * + * (see also mptbase.c) + * + * Copyright (c) 1999-2002 LSI Logic Corporation + * Originally By: Steven J. Ralston + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) + * + * $Id: mptctl.h,v 1.2 2002/03/19 23:05:36 awilliam Exp $ + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef MPTCTL_H_INCLUDED +#define MPTCTL_H_INCLUDED +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "linux/version.h" + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * + */ +#define MPT_MISCDEV_BASENAME "mptctl" +#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME + +#define MPT_PRODUCT_LENGTH 12 + +/* + * Generic MPT Control IOCTLs and structures + */ +#define MPT_MAGIC_NUMBER 'm' + +#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w) + +#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer) +#define MPTCOMMAND _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command) + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32) +#define MPTCOMMAND32 _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command32) +#endif /*}*/ + +#define MPTIOCINFO _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo) +#define MPTTARGETINFO _IOWR(MPT_MAGIC_NUMBER,18,struct mpt_ioctl_targetinfo) +#define MPTTEST _IOWR(MPT_MAGIC_NUMBER,19,struct mpt_ioctl_test) +#define MPTEVENTQUERY _IOWR(MPT_MAGIC_NUMBER,21,struct mpt_ioctl_eventquery) +#define MPTEVENTENABLE _IOWR(MPT_MAGIC_NUMBER,22,struct mpt_ioctl_eventenable) +#define MPTEVENTREPORT _IOWR(MPT_MAGIC_NUMBER,23,struct mpt_ioctl_eventreport) +#define MPTHARDRESET _IOWR(MPT_MAGIC_NUMBER,24,struct mpt_ioctl_diag_reset) +#define MPTFWREPLACE _IOWR(MPT_MAGIC_NUMBER,25,struct mpt_ioctl_replace_fw) + +/* + * SPARC PLATFORM REMARK: + * IOCTL data structures that contain pointers + * will have different sizes in the driver and applications + * (as the app. will not use 8-byte pointers). + * Apps should use MPTFWDOWNLOAD and MPTCOMMAND. + * The driver will convert data from + * mpt_fw_xfer32 (mpt_ioctl_command32) to mpt_fw_xfer (mpt_ioctl_command) + * internally. + */ +struct mpt_fw_xfer { + unsigned int iocnum; /* IOC unit number */ + unsigned int fwlen; + void *bufp; /* Pointer to firmware buffer */ +}; + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +struct mpt_fw_xfer32 { + unsigned int iocnum; + unsigned int fwlen; + u32 bufp; +}; +#endif /*}*/ + + +/* + * IOCTL header structure. + * iocnum - must be defined. + * port - must be defined for all IOCTL commands other than MPTIOCINFO + * maxDataSize - ignored on MPTCOMMAND commands + * - ignored on MPTFWREPLACE commands + * - on query commands, reports the maximum number of bytes to be returned + * to the host driver (count includes the header). + * That is, set to sizeof(struct mpt_ioctl_iocinfo) for fixed sized commands. + * Set to sizeof(struct mpt_ioctl_targetinfo) + datasize for variable + * sized commands. (MPTTARGETINFO, MPTEVENTREPORT) + */ +typedef struct _mpt_ioctl_header { + unsigned int iocnum; /* IOC unit number */ + unsigned int port; /* IOC port number */ + int maxDataSize; /* Maximum Num. bytes to transfer on read */ +} mpt_ioctl_header; + +/* + * Issue a diagnostic reset + */ +struct mpt_ioctl_diag_reset { + mpt_ioctl_header hdr; +}; + + +/* + * Adapter Information Page + * Read only. + * Data starts at offset 0xC + */ +#define MPT_IOCTL_INTERFACE_FC (0x01) +#define MPT_IOCTL_INTERFACE_SCSI (0x00) +#define MPT_IOCTL_VERSION_LENGTH (32) + +struct mpt_ioctl_iocinfo { + mpt_ioctl_header hdr; + int adapterType; /* SCSI or FCP */ + int port; /* port number */ + int pciId; /* PCI Id. */ + int hwRev; /* hardware revision */ + int subSystemDevice; /* PCI subsystem Device ID */ + int subSystemVendor; /* PCI subsystem Vendor ID */ + int numDevices; /* number of devices */ + int FWVersion; /* FW Version (integer) */ + int BIOSVersion; /* BIOS Version (integer) */ + char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */ + char busChangeEvent; + char hostId; + char rsvd[2]; +}; + +/* + * Device Information Page + * Report the number of, and ids of, all targets + * on this IOC. The ids array is a packed structure + * of the known targetInfo. + * bits 31-24: reserved + * 23-16: LUN + * 15- 8: Bus Number + * 7- 0: Target ID + */ +struct mpt_ioctl_targetinfo { + mpt_ioctl_header hdr; + int numDevices; /* Num targets on this ioc */ + int targetInfo[1]; +}; + + +/* + * Event reporting IOCTL's. These IOCTL's will + * use the following defines: + */ +struct mpt_ioctl_eventquery { + mpt_ioctl_header hdr; + unsigned short eventEntries; + unsigned short reserved; + unsigned int eventTypes; +}; + +struct mpt_ioctl_eventenable { + mpt_ioctl_header hdr; + unsigned int eventTypes; +}; + +#ifndef __KERNEL__ +typedef struct { + uint event; + uint eventContext; + uint data[2]; +} MPT_IOCTL_EVENTS; +#endif + +struct mpt_ioctl_eventreport { + mpt_ioctl_header hdr; + MPT_IOCTL_EVENTS eventData[1]; +}; + +#define MPT_MAX_NAME 32 +struct mpt_ioctl_test { + mpt_ioctl_header hdr; + u8 name[MPT_MAX_NAME]; + int chip_type; + u8 product [MPT_PRODUCT_LENGTH]; +}; + +/* Replace the FW image cached in host driver memory + * newImageSize - image size in bytes + * newImage - first byte of the new image + */ +typedef struct mpt_ioctl_replace_fw { + mpt_ioctl_header hdr; + int newImageSize; + u8 newImage[1]; +} mpt_ioctl_replace_fw_t; + +/* General MPT Pass through data strucutre + * + * iocnum + * timeout - in seconds, command timeout. If 0, set by driver to + * default value. + * replyFrameBufPtr - reply location + * dataInBufPtr - destination for read + * dataOutBufPtr - data source for write + * senseDataPtr - sense data location + * maxReplyBytes - maximum number of reply bytes to be sent to app. + * dataInSize - num bytes for data transfer in (read) + * dataOutSize - num bytes for data transfer out (write) + * dataSgeOffset - offset in words from the start of the request message + * to the first SGL + * MF[1]; + * + * Remark: Some config pages have bi-directional transfer, + * both a read and a write. The basic structure allows for + * a bidirectional set up. Normal messages will have one or + * both of these buffers NULL. + */ +struct mpt_ioctl_command { + mpt_ioctl_header hdr; + int timeout; /* optional (seconds) */ + char *replyFrameBufPtr; + char *dataInBufPtr; + char *dataOutBufPtr; + char *senseDataPtr; + int maxReplyBytes; + int dataInSize; + int dataOutSize; + int maxSenseBytes; + int dataSgeOffset; + char MF[1]; +}; + +/* + * SPARC PLATFORM: See earlier remark. + */ +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +struct mpt_ioctl_command32 { + mpt_ioctl_header hdr; + int timeout; + u32 replyFrameBufPtr; + u32 dataInBufPtr; + u32 dataOutBufPtr; + u32 senseDataPtr; + int maxReplyBytes; + int dataInSize; + int dataOutSize; + int maxSenseBytes; + int dataSgeOffset; + char MF[1]; +}; +#endif /*}*/ + + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + /* + * COMPAQ Specific IOCTL Defines and Structures + */ + +#define CPQFCTS_IOC_MAGIC 'Z' + +#define CPQFCTS_GETPCIINFO _IOR(CPQFCTS_IOC_MAGIC, 1, cpqfc_pci_info_struct) +#define CPQFCTS_GETDRIVER _IOR(CPQFCTS_IOC_MAGIC, 2, int) +#define CPQFCTS_CTLR_STATUS _IOR(CPQFCTS_IOC_MAGIC, 3, struct _cpqfc_ctlr_status) +#define CPQFCTS_SCSI_IOCTL_FC_TARGET_ADDRESS _IOR(CPQFCTS_IOC_MAGIC, 4, struct scsi_fctargaddress) +#define CPQFCTS_SCSI_PASSTHRU _IOWR(CPQFCTS_IOC_MAGIC, 5, VENDOR_IOCTL_REQ) +#if defined(__sparc__) && defined(__sparc_v9__) +#define CPQFCTS_SCSI_PASSTHRU32 _IOWR(CPQFCTS_IOC_MAGIC, 5, VENDOR_IOCTL_REQ32) +#endif + +typedef struct { + unsigned short bus; + unsigned short bus_type; + unsigned short device_fn; + u32 board_id; + u32 slot_number; + unsigned short vendor_id; + unsigned short device_id; + unsigned short class_code; + unsigned short sub_vendor_id; + unsigned short sub_device_id; + u8 serial_number[81]; +} cpqfc_pci_info_struct; + + +typedef struct scsi_fctargaddress { + unsigned int host_port_id; + u8 host_wwn[8]; /* WW Network Name */ +} Scsi_FCTargAddress; + +typedef struct _cpqfc_ctlr_status { + u32 status; + u32 offline_reason; +} cpqfc_ctlr_status; + + +/* Compaq SCSI I/O Passthru structures. + */ +#define MPT_COMPAQ_READ 0x26 +#define MPT_COMPAQ_WRITE 0x27 + +typedef struct { + int lc; /* controller number */ + int node; /* node number */ + int ld; /* target logical id */ + u32 nexus; + void *argp; +} VENDOR_IOCTL_REQ; + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +typedef struct { + int lc; /* controller number */ + int node; /* node number */ + int ld; /* target logical id */ + u32 nexus; + u32 argp; +} VENDOR_IOCTL_REQ32; +#endif + +typedef struct { + char cdb[16]; /* cdb */ + unsigned short bus; /* bus number */ + unsigned short pdrive; /* physical drive */ + int len; /* data area size */ + int sense_len; /* sense size */ + char sense_data[40]; /* sense buffer */ + void *bufp; /* data buffer pointer */ + char rw_flag; +} cpqfc_passthru_t; + +#if defined(__KERNEL__) && defined(__sparc__) && defined(__sparc_v9__) /*{*/ +typedef struct { + char cdb[16]; /* cdb */ + unsigned short bus; /* bus number */ + unsigned short pdrive; /* physical drive */ + int len; /* data area size */ + int sense_len; /* sense size */ + char sense_data[40]; /* sense buffer */ + u32 bufp; /* data buffer pointer */ + char rw_flag; +} cpqfc_passthru32_t; +#endif + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#endif + diff -Nru a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c --- a/drivers/message/fusion/mptlan.c Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/mptlan.c Thu May 30 21:28:59 2002 @@ -23,10 +23,10 @@ * * (see also mptbase.c) * - * Copyright (c) 2000-2001 LSI Logic Corporation + * Copyright (c) 2000-2002 LSI Logic Corporation * Originally By: Noah Romer * - * $Id: mptlan.c,v 1.32.2.2 2001/07/12 19:43:33 nromer Exp $ + * $Id: mptlan.c,v 1.51 2002/02/11 14:40:55 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -79,6 +79,8 @@ #define MYNAM "mptlan" +MODULE_LICENSE("GPL"); + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * MPT LAN message sizes without variable part. @@ -109,8 +111,8 @@ MPT_ADAPTER *mpt_dev; u8 pnum; /* Port number in the IOC. This is not a Unix network port! */ - atomic_t buckets_out; /* number of unused buckets on IOC */ - int bucketthresh; /* Send more when this many used */ + atomic_t buckets_out; /* number of unused buckets on IOC */ + int bucketthresh; /* Send more when this many left */ int *mpt_txfidx; /* Free Tx Context list */ int mpt_txfidx_tail; @@ -123,8 +125,8 @@ struct BufferControl *RcvCtl; /* Receive BufferControl structs */ struct BufferControl *SendCtl; /* Send BufferControl structs */ - int max_buckets_out; /* Max buckets to send to IOC */ - int tx_max_out; /* IOC's Tx queue len */ + int max_buckets_out; /* Max buckets to send to IOC */ + int tx_max_out; /* IOC's Tx queue len */ u32 total_posted; u32 total_received; @@ -152,7 +154,8 @@ static int mpt_lan_reset(struct net_device *dev); static int mpt_lan_close(struct net_device *dev); static void mpt_lan_post_receive_buckets(void *dev_id); -static void mpt_lan_wake_post_buckets_task(struct net_device *dev); +static void mpt_lan_wake_post_buckets_task(struct net_device *dev, + int priority); static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); static int mpt_lan_receive_post_reply(struct net_device *dev, LANReceivePostReply_t *pRecvRep); @@ -175,8 +178,10 @@ static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1]; +#ifdef QLOGIC_NAA_WORKAROUND static struct NAA_Hosed *mpt_bad_naa = NULL; rwlock_t bad_naa_lock; +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -229,7 +234,7 @@ case LAN_REPLY_FORM_SEND_SINGLE: // dioprintk((MYNAM "/lan_reply: " // "calling mpt_lan_send_reply (turbo)\n")); - + // Potential BUG here? -sralston // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg); // If/when mpt_lan_send_turbo would return 1 here, @@ -333,7 +338,7 @@ struct net_device *dev = mpt_landev[ioc->id]; struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv; - dprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", + dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); if (priv->mpt_rxfidx == NULL) @@ -342,9 +347,11 @@ if (reset_phase == MPT_IOC_PRE_RESET) { int i; unsigned long flags; - + netif_stop_queue(dev); + dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name)); + atomic_set(&priv->buckets_out, 0); /* Reset Rx Free Tail index and re-populate the queue. */ @@ -365,7 +372,7 @@ static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { - dprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n")); + dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n")); switch (le32_to_cpu(pEvReply->Event)) { case MPI_EVENT_NONE: /* 00 */ @@ -403,9 +410,9 @@ if (mpt_lan_reset(dev) != 0) { MPT_ADAPTER *mpt_dev = priv->mpt_dev; - + printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed."); - + if (mpt_dev->active) printk ("The ioc is active. Perhaps it needs to be" " reset?\n"); @@ -429,7 +436,7 @@ priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i; } - dprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); + dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int), GFP_KERNEL); @@ -447,12 +454,12 @@ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; } -/**/ dprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); +/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); /**/ for (i = 0; i < priv->tx_max_out; i++) -/**/ dprintk((" %xh", priv->mpt_txfidx[i])); -/**/ dprintk(("\n")); +/**/ dlprintk((" %xh", priv->mpt_txfidx[i])); +/**/ dlprintk(("\n")); - dprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); + dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); mpt_lan_post_receive_buckets(dev); printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", @@ -466,7 +473,7 @@ } netif_start_queue(dev); - dprintk((KERN_INFO MYNAM "/lo: Done.\n")); + dlprintk((KERN_INFO MYNAM "/lo: Done.\n")); return 0; out_mpt_rxfidx: @@ -494,7 +501,7 @@ mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id); if (mf == NULL) { -/* dprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " +/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " "Unable to allocate a request frame.\n")); */ return -1; @@ -523,11 +530,11 @@ unsigned int timeout; int i; - dprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); + dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); mpt_event_deregister(LanCtx); - dprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " + dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " "since driver was loaded, %d still out\n", priv->total_posted,atomic_read(&priv->buckets_out))); @@ -537,18 +544,18 @@ timeout = 2 * HZ; while (atomic_read(&priv->buckets_out) && --timeout) { - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } for (i = 0; i < priv->max_buckets_out; i++) { if (priv->RcvCtl[i].skb != NULL) { -/**/ dprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " +/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " /**/ "is still out\n", i)); pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma, - priv->RcvCtl[i].len, + priv->RcvCtl[i].len, PCI_DMA_FROMDEVICE); - dev_kfree_skb(priv->RcvCtl[i].skb); + dev_kfree_skb(priv->RcvCtl[i].skb); } } @@ -556,11 +563,11 @@ kfree (priv->mpt_rxfidx); for (i = 0; i < priv->tx_max_out; i++) { - if (priv->SendCtl[i].skb != NULL) { + if (priv->SendCtl[i].skb != NULL) { pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma, priv->SendCtl[i].len, PCI_DMA_TODEVICE); - dev_kfree_skb(priv->SendCtl[i].skb); + dev_kfree_skb(priv->SendCtl[i].skb); } } @@ -599,7 +606,13 @@ static void mpt_lan_tx_timeout(struct net_device *dev) { - netif_wake_queue(dev); + struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv; + MPT_ADAPTER *mpt_dev = priv->mpt_dev; + + if (mpt_dev->active) { + dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name)); + netif_wake_queue(dev); + } } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -722,7 +735,6 @@ dma_addr_t dma; unsigned long flags; int ctx; - struct NAA_Hosed *nh; u16 cur_naa = 0x1000; dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", @@ -741,7 +753,6 @@ mf = mpt_get_msg_frame(LanCtx, mpt_dev->id); if (mf == NULL) { netif_stop_queue(dev); - dev_kfree_skb(skb); spin_unlock_irqrestore(&priv->txfidx_lock, flags); printk (KERN_ERR "%s: Unable to alloc request frame\n", @@ -791,6 +802,10 @@ // IOC_AND_NETDEV_NAMES_s_s(dev), // ctx, skb, skb->data)); +#ifdef QLOGIC_NAA_WORKAROUND +{ + struct NAA_Hosed *nh; + /* Munge the NAA for Tx packets to QLogic boards, which don't follow RFC 2625. The longer I look at this, the more my opinion of Qlogic drops. */ @@ -803,12 +818,14 @@ (nh->ieee[4] == skb->mac.raw[4]) && (nh->ieee[5] == skb->mac.raw[5])) { cur_naa = nh->NAA; - dprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " + dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " "= %04x.\n", cur_naa)); break; } } read_unlock_irq(&bad_naa_lock); +} +#endif pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | (skb->mac.raw[0] << 8) | @@ -821,10 +838,10 @@ pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; /* If we ever decide to send more than one Simple SGE per LANSend, then - we will need to make sure that LAST_ELEMENT only gets set on the + we will need to make sure that LAST_ELEMENT only gets set on the last one. Otherwise, bad voodoo and evil funkiness will commence. */ pSimple->FlagsLength = cpu_to_le32( - ((MPI_SGE_FLAGS_LAST_ELEMENT | + ((MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_SYSTEM_ADDRESS | @@ -842,23 +859,32 @@ dev->trans_start = jiffies; dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - le32_to_cpu(pSimple->FlagsLength))); + IOC_AND_NETDEV_NAMES_s_s(dev), + le32_to_cpu(pSimple->FlagsLength))); return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static inline void -mpt_lan_wake_post_buckets_task(struct net_device *dev) +mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) +/* + * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue + */ { struct mpt_lan_priv *priv = dev->priv; - + if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { - queue_task(&priv->post_buckets_task, &tq_immediate); - mark_bh(IMMEDIATE_BH); - dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", - IOC_AND_NETDEV_NAMES_s_s(dev) )); + if (priority) { + queue_task(&priv->post_buckets_task, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } else { + queue_task(&priv->post_buckets_task, &tq_timer); + dioprintk((KERN_INFO MYNAM ": post_buckets queued on " + "timer.\n")); + } + dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", + IOC_AND_NETDEV_NAMES_s_s(dev) )); } } @@ -870,7 +896,7 @@ skb->protocol = mpt_lan_type_trans(skb, dev); - dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) " + dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) " "delivered to upper level.\n", IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); @@ -884,7 +910,7 @@ atomic_read(&priv->buckets_out))); if (atomic_read(&priv->buckets_out) < priv->bucketthresh) - mpt_lan_wake_post_buckets_task(dev); + mpt_lan_wake_post_buckets_task(dev, 1); dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets " "remaining, %d received back since sod\n", @@ -956,12 +982,12 @@ unsigned long flags; struct sk_buff *skb; u32 ctx; - u8 count; + int count; int i; count = pRecvRep->NumberOfContexts; -/**/ dprintk((KERN_INFO MYNAM "/receive_post_reply: " +/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: " "IOC returned %d buckets, freeing them...\n", count)); spin_lock_irqsave(&priv->rxfidx_lock, flags); @@ -970,11 +996,11 @@ skb = priv->RcvCtl[ctx].skb; -// dprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n", +// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n", // IOC_AND_NETDEV_NAMES_s_s(dev))); -// dprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p", -// priv, &(priv->buckets_out))); -// dprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); +// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p", +// priv, &(priv->buckets_out))); +// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); priv->RcvCtl[ctx].skb = NULL; pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, @@ -989,13 +1015,13 @@ // for (i = 0; i < priv->max_buckets_out; i++) // if (priv->RcvCtl[i].skb != NULL) -// dprintk((KERN_INFO MYNAM "@rpr: bucket %03x " +// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x " // "is still out\n", i)); -/* dprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", +/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", count)); */ -/**/ dprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " +/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " /**/ "remaining, %d received back since sod.\n", /**/ atomic_read(&priv->buckets_out), priv->total_received)); return 0; @@ -1010,9 +1036,9 @@ MPT_ADAPTER *mpt_dev = priv->mpt_dev; struct sk_buff *skb, *old_skb; unsigned long flags; - u32 len, ctx; - u32 offset; - u8 count; + u32 len, ctx, offset; + u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); + int count; int i, l; dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n")); @@ -1059,7 +1085,7 @@ if (!skb) { printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", IOC_AND_NETDEV_NAMES_s_s(dev), - __FILE__, __LINE__); + __FILE__, __LINE__); return -ENOMEM; } @@ -1096,7 +1122,7 @@ if (!skb) { printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", IOC_AND_NETDEV_NAMES_s_s(dev), - __FILE__, __LINE__); + __FILE__, __LINE__); return -ENOMEM; } @@ -1140,25 +1166,32 @@ "Arrgghh! We've done it again!\n"); } -#if 0 - { - u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); - if (remaining < priv->bucketthresh) - mpt_lan_wake_post_buckets_task(dev); - - if (remaining == 0) - printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " - "(priv->buckets_out = %d)\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - atomic_read(&priv->buckets_out)); - else - printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. " - "(priv->buckets_out = %d)\n", - IOC_AND_NETDEV_NAMES_s_s(dev), - remaining, atomic_read(&priv->buckets_out)); + if (remaining == 0) + printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " + "(priv->buckets_out = %d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + atomic_read(&priv->buckets_out)); + else if (remaining < 10) + printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. " + "(priv->buckets_out = %d)\n", + IOC_AND_NETDEV_NAMES_s_s(dev), + remaining, atomic_read(&priv->buckets_out)); + + if ((remaining < priv->bucketthresh) && + ((atomic_read(&priv->buckets_out) - remaining) > + MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) { + + printk (KERN_WARNING MYNAM " Mismatch between driver's " + "buckets_out count and fw's BucketsRemaining " + "count has crossed the threshold, issuing a " + "LanReset to clear the fw's hashtable. You may " + "want to check your /var/log/messages for \"CRC " + "error\" event notifications.\n"); + + mpt_lan_reset(dev); + mpt_lan_wake_post_buckets_task(dev, 0); } -#endif - + return mpt_lan_receive_skb(dev, skb); } @@ -1242,15 +1275,15 @@ if (skb == NULL) { skb = dev_alloc_skb(len); if (skb == NULL) { -/**/ printk (KERN_WARNING -/**/ MYNAM "/%s: Can't alloc skb\n", -/**/ __FUNCTION__); + printk (KERN_WARNING + MYNAM "/%s: Can't alloc skb\n", + __FUNCTION__); priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; spin_unlock_irqrestore(&priv->rxfidx_lock, flags); break; } - dma = pci_map_single(mpt_dev->pcidev, skb->data, + dma = pci_map_single(mpt_dev->pcidev, skb->data, len, PCI_DMA_FROMDEVICE); priv->RcvCtl[ctx].skb = skb; @@ -1308,7 +1341,7 @@ dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", - __FUNCTION__, priv->total_posted, priv->total_received)); + __FUNCTION__, priv->total_posted, priv->total_received)); clear_bit(0, &priv->post_buckets_active); } @@ -1336,7 +1369,7 @@ priv->post_buckets_task.data = dev; priv->post_buckets_active = 0; - dprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", + dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", __LINE__, dev->mtu + dev->hard_header_len + 4)); atomic_set(&priv->buckets_out, 0); @@ -1346,7 +1379,7 @@ if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out) priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets; - dprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n", + dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n", __LINE__, mpt_dev->pfacts[0].MaxLanBuckets, max_buckets_out, @@ -1389,7 +1422,7 @@ dev->tx_timeout = mpt_lan_tx_timeout; dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; - dprintk((KERN_INFO MYNAM ": Finished registering dev " + dlprintk((KERN_INFO MYNAM ": Finished registering dev " "and setting initial values\n")); SET_MODULE_OWNER(dev); @@ -1407,9 +1440,11 @@ show_mptmod_ver(LANAME, LANVER); - /* Init the global r/w lock for the bad_naa list. We want to do this +#ifdef QLOGIC_NAA_WORKAROUND + /* Init the global r/w lock for the bad_naa list. We want to do this before any boards are initialized and may be used. */ rwlock_init(&bad_naa_lock); +#endif if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) { printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n"); @@ -1419,10 +1454,10 @@ /* Set the callback index to be used by driver core for turbo replies */ mpt_lan_index = LanCtx; - dprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx)); + dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx)); if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) { - dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); + dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); } else { printk(KERN_ERR MYNAM ": Eieee! unable to register a reset " "handler with mptbase! The world is at an end! " @@ -1458,7 +1493,7 @@ // IOC_AND_NETDEV_NAMES_s_s(dev), // NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out); mpt_landev[j] = dev; - dprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n", + dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n", dev, j, mpt_landev[j])); j++; @@ -1508,18 +1543,15 @@ MODULE_PARM(tx_max_out_p, "i"); MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME! -MODULE_LICENSE("GPL"); - module_init(mpt_lan_init); module_exit(mpt_lan_exit); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static unsigned short -mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) +mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) { struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; struct fcllc *fcllc; - u16 source_naa = fch->stype, found = 0; skb->mac.raw = skb->data; skb_pull(skb, sizeof(struct mpt_lan_ohdr)); @@ -1535,7 +1567,7 @@ printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n", NETDEV_PTR_TO_IOC_NAME_s(dev)); printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", - fch->saddr[0], fch->saddr[1], fch->saddr[2], + fch->saddr[0], fch->saddr[1], fch->saddr[2], fch->saddr[3], fch->saddr[4], fch->saddr[5]); } @@ -1555,6 +1587,10 @@ fcllc = (struct fcllc *)skb->data; +#ifdef QLOGIC_NAA_WORKAROUND +{ + u16 source_naa = fch->stype, found = 0; + /* Workaround for QLogic not following RFC 2625 in regards to the NAA value. */ @@ -1562,15 +1598,15 @@ source_naa = swab16(source_naa); if (fcllc->ethertype == htons(ETH_P_ARP)) - dprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " + dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " "%04x.\n", source_naa)); - if ((fcllc->ethertype == htons(ETH_P_ARP)) && + if ((fcllc->ethertype == htons(ETH_P_ARP)) && ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){ struct NAA_Hosed *nh, *prevnh; int i; - dprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " + dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " "system with non-RFC 2625 NAA value (%04x).\n", source_naa)); @@ -1584,17 +1620,17 @@ (nh->ieee[4] == fch->saddr[4]) && (nh->ieee[5] == fch->saddr[5])) { found = 1; - dprintk ((KERN_INFO "mptlan/type_trans: ARP Re" + dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re" "q/Rep w/ bad NAA from system already" " in DB.\n")); break; } } - + if ((!found) && (nh == NULL)) { nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL); - dprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" + dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" " bad NAA from system not yet in DB.\n")); if (nh != NULL) { @@ -1603,11 +1639,11 @@ mpt_bad_naa = nh; if (prevnh) prevnh->next = nh; - + nh->NAA = source_naa; /* Set the S_NAA value. */ for (i = 0; i < FC_ALEN; i++) nh->ieee[i] = fch->saddr[i]; - dprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" + dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" "%02x:%02x with non-compliant S_NAA value.\n", fch->saddr[0], fch->saddr[1], fch->saddr[2], fch->saddr[3], fch->saddr[4],fch->saddr[5])); @@ -1622,9 +1658,10 @@ } write_unlock_irq(&bad_naa_lock); } - +} +#endif - /* Strip the SNAP header from ARP packets since we don't + /* Strip the SNAP header from ARP packets since we don't * pass them through to the 802.2/SNAP layers. */ if (fcllc->dsap == EXTENDED_SAP && diff -Nru a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h --- a/drivers/message/fusion/mptlan.h Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/mptlan.h Thu May 30 21:28:59 2002 @@ -21,6 +21,7 @@ #include #include #include +#include // #include #include @@ -43,13 +44,15 @@ #define MPT_LAN_MAX_BUCKETS_OUT 256 #define MPT_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */ +#define MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH 10 #define MPT_LAN_RX_COPYBREAK 200 -#define MPT_LAN_TX_TIMEOUT (1*HZ) +#define MPT_LAN_TX_TIMEOUT (1*HZ) #define MPT_TX_MAX_OUT_LIM 127 #define MPT_LAN_MIN_MTU 96 /* RFC2625 */ #define MPT_LAN_MAX_MTU 65280 /* RFC2625 */ -#define MPT_LAN_MTU 16128 /* be nice to slab allocator */ +#define MPT_LAN_MTU 13312 /* Max perf range + lower mem + usage than 16128 */ #define MPT_LAN_NAA_RFC2625 0x1 #define MPT_LAN_NAA_QLOGIC 0x2 @@ -64,6 +67,12 @@ #define dioprintk(x) printk x #else #define dioprintk(x) +#endif + +#ifdef MPT_LAN_DEBUG +#define dlprintk(x) printk x +#else +#define dlprintk(x) #endif #define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)(d)->priv) diff -Nru a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c --- a/drivers/message/fusion/mptscsih.c Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/mptscsih.c Thu May 30 21:28:59 2002 @@ -9,17 +9,24 @@ * This driver would not exist if not for Alan Cox's development * of the linux i2o driver. * + * A special thanks to Pamela Delaney (LSI Logic) for tons of work + * and countless enhancements while adding support for the 1030 + * chip family. Pam has been instrumental in the development of + * of the 2.xx.xx series fusion drivers, and her contributions are + * far too numerous to hope to list in one place. + * * A huge debt of gratitude is owed to David S. Miller (DaveM) * for fixing much of the stupid and broken stuff in the early * driver while porting to sparc64 platform. THANK YOU! * * (see mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Original author: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptscsih.c,v 1.29.4.1 2001/09/18 03:22:30 sralston Exp $ + * $Id: mptscsih.c,v 1.80 2002/02/27 18:44:27 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -65,7 +72,10 @@ #include #include #include -#include +#include /* for io_request_lock (spinlock) decl */ +#include /* for mdelay */ +#include /* needed for in_interrupt() proto */ +#include /* notifier code */ #include "../../scsi/scsi.h" #include "../../scsi/hosts.h" #include "../../scsi/sd.h" @@ -83,52 +93,131 @@ MODULE_DESCRIPTION(my_NAME); MODULE_LICENSE("GPL"); +/* Set string for command line args from insmod */ +#ifdef MODULE +char *mptscsih = 0; +MODULE_PARM(mptscsih, "s"); +#endif /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ typedef struct _BIG_SENSE_BUF { - u8 data[256]; + u8 data[MPT_SENSE_BUFFER_ALLOC]; } BIG_SENSE_BUF; -typedef struct _MPT_SCSI_HOST { - MPT_ADAPTER *ioc; - int port; - struct scsi_cmnd **ScsiLookup; - u8 *SgHunks; - dma_addr_t SgHunksDMA; - u32 qtag_tick; -} MPT_SCSI_HOST; - -typedef struct _MPT_SCSI_DEV { - struct _MPT_SCSI_DEV *forw; - struct _MPT_SCSI_DEV *back; - MPT_ADAPTER *ioc; - int sense_sz; - BIG_SENSE_BUF CachedSense; - unsigned long io_cnt; - unsigned long read_cnt; -} MPT_SCSI_DEV; +#define MPT_SCANDV_GOOD (0x00000000) /* must be 0 */ +#define MPT_SCANDV_DID_RESET (0x00000001) +#define MPT_SCANDV_SENSE (0x00000002) +#define MPT_SCANDV_SOME_ERROR (0x00000004) +#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) + +#define MPT_SCANDV_MAX_RETRIES (10) + +#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */ +#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */ +#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */ +#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */ +#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occured with this command */ +#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */ + +typedef struct _internal_cmd { + char *data; /* data pointer */ + dma_addr_t data_dma; /* data dma address */ + int size; /* transfer size */ + u8 cmd; /* SCSI Op Code */ + u8 bus; /* bus number */ + u8 id; /* SCSI ID (virtual) */ + u8 lun; + u8 flags; /* Bit Field - See above */ + u8 physDiskNum; /* Phys disk number, -1 else */ + u8 rsvd2; + u8 rsvd; +} INTERNAL_CMD; + +typedef struct _negoparms { + u8 width; + u8 offset; + u8 factor; + u8 flags; +} NEGOPARMS; + +typedef struct _dv_parameters { + NEGOPARMS max; + NEGOPARMS now; + u8 cmd; + u8 id; + u16 pad1; +} DVPARAMETERS; + /* * Other private/forward protos... */ - static int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static void mptscsih_report_queue_full(Scsi_Cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq); static int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static int mptscsih_io_direction(Scsi_Cmnd *cmd); + +static int mptscsih_Add32BitSGE(MPT_SCSI_HOST *hd, Scsi_Cmnd *SCpnt, + SCSIIORequest_t *pReq, int req_idx); +static void mptscsih_AddNullSGE(SCSIIORequest_t *pReq); +static int mptscsih_getFreeChainBuffer(MPT_SCSI_HOST *hd, int *retIndex); +static void mptscsih_freeChainBuffers(MPT_SCSI_HOST *hd, int req_idx); +static int mptscsih_initChainBuffers (MPT_SCSI_HOST *hd, int init); + static void copy_sense_data(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); -static u32 SCPNT_TO_MSGCTX(Scsi_Cmnd *sc); +#ifndef MPT_SCSI_USE_NEW_EH +static void search_taskQ_for_cmd(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd); +#endif +static u32 SCPNT_TO_LOOKUP_IDX(Scsi_Cmnd *sc); +static MPT_FRAME_HDR *mptscsih_search_pendingQ(MPT_SCSI_HOST *hd, int scpnt_idx); +static void post_pendingQ_commands(MPT_SCSI_HOST *hd); + +static int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag); +static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag); static int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); static int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); +static VirtDevice *mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen); +void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target); +static void clear_sense_flag(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq); +static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq, char *data); +static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags); +static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags); +static int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); +static void mptscsih_timer_expired(unsigned long data); +static void mptscsih_taskmgmt_timeout(unsigned long data); +static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); +static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum); + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io); +static void mptscsih_domainValidation(void *hd); +static void mptscsih_doDv(MPT_SCSI_HOST *hd, int portnum, int target); +static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage); +static void mptscsih_fillbuf(char *buffer, int size, int index, int width); +#endif +static int mptscsih_setup(char *str); +static int mptscsih_halt(struct notifier_block *nb, ulong event, void *buf); + +/* + * Reboot Notification + */ +static struct notifier_block mptscsih_notifier = { + mptscsih_halt, NULL, 0 +}; + +/* + * Private data... + */ static int mpt_scsi_hosts = 0; static atomic_t queue_depth; static int ScsiDoneCtx = -1; static int ScsiTaskCtx = -1; +static int ScsiScanDvCtx = -1; /* Used only for bus scan and dv */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,28) static struct proc_dir_entry proc_mpt_scsihost = @@ -141,23 +230,40 @@ }; #endif -#define SNS_LEN(scp) sizeof((scp)->sense_buffer) +#define SNS_LEN(scp) sizeof((scp)->sense_buffer) #ifndef MPT_SCSI_USE_NEW_EH /* * Stuff to handle single-threading SCSI TaskMgmt * (abort/reset) requests... */ -static spinlock_t mpt_scsih_taskQ_lock = SPIN_LOCK_UNLOCKED; -static MPT_Q_TRACKER mpt_scsih_taskQ = { - (MPT_FRAME_HDR*) &mpt_scsih_taskQ, - (MPT_FRAME_HDR*) &mpt_scsih_taskQ -}; -static int mpt_scsih_taskQ_cnt = 0; -static int mpt_scsih_taskQ_bh_active = 0; -static MPT_FRAME_HDR *mpt_scsih_active_taskmgmt_mf = NULL; +static spinlock_t mytaskQ_lock = SPIN_LOCK_UNLOCKED; +static int mytaskQ_bh_active = 0; +static struct tq_struct mptscsih_ptaskfoo; +static atomic_t mpt_taskQdepth; +#endif + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +/* + * Domain Validation task structure + */ +static spinlock_t dvtaskQ_lock = SPIN_LOCK_UNLOCKED; +static int dvtaskQ_active = 0; +static int dvtaskQ_release = 0; +static struct tq_struct mptscsih_dvTask; #endif +/* + * Wait Queue setup + */ +static DECLARE_WAIT_QUEUE_HEAD (scandv_waitq); +static int scandv_wait_done = 1; + +/* Driver default setup + */ +static struct mptscsih_driver_setup + driver_setup = MPTSCSIH_DRIVER_SETUP; + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * mptscsih_io_done - Main SCSI IO callback routine registered to @@ -174,123 +280,109 @@ * Returns 1 indicating alloc'd request frame ptr should be freed. */ static int -mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r) +mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { Scsi_Cmnd *sc; MPT_SCSI_HOST *hd; - MPT_SCSI_DEV *mpt_sdev = NULL; + SCSIIORequest_t *pScsiReq; + SCSIIOReply_t *pScsiReply; + unsigned long flags; u16 req_idx; + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if ((mf == NULL) || (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(KERN_ERR MYNAM ": ERROR! NULL or BAD req frame ptr (=%p)!\n", mf); - return 1; + printk(MYIOC_s_ERR_FMT "%s req frame ptr! (=%p)!\n", + ioc->name, mf?"BAD":"NULL", mf); + /* return 1; CHECKME SteveR. Don't free. */ + return 0; } - hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sc = hd->ScsiLookup[req_idx]; - hd->ScsiLookup[req_idx] = NULL; + if (sc == NULL) { + MPIHeader_t *hdr = (MPIHeader_t *)mf; - dmfprintk((KERN_INFO MYNAM ": ScsiDone (req:sc:reply=%p:%p:%p)\n", mf, sc, r)); + atomic_dec(&queue_depth); - atomic_dec(&queue_depth); + /* writeSDP1 will use the ScsiDoneCtx + * There is no processing for the reply. + * Just return to the calling function. + */ + if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST) + printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n", ioc->name); - /* - * Check for {1st} {IO} completion to "new" device. - * How do we know it's a new device? - * If we haven't set SDpnt->hostdata I guess... - */ - if (sc && sc->device) { - mpt_sdev = (MPT_SCSI_DEV*)sc->device->hostdata; - if (!mpt_sdev) { - dprintk((KERN_INFO MYNAM ": *NEW* SCSI device (%d:%d:%d)!\n", - sc->device->id, sc->device->lun, sc->device->channel)); - if ((sc->device->hostdata = kmalloc(sizeof(MPT_SCSI_DEV), GFP_ATOMIC)) == NULL) { - printk(KERN_ERR MYNAM ": ERROR - kmalloc(%d) FAILED!\n", (int)sizeof(MPT_SCSI_DEV)); - } else { - memset(sc->device->hostdata, 0, sizeof(MPT_SCSI_DEV)); - mpt_sdev = (MPT_SCSI_DEV *) sc->device->hostdata; - mpt_sdev->ioc = ioc; - } - } else { - if (++mpt_sdev->io_cnt && mptscsih_io_direction(sc) < 0) { - if (++mpt_sdev->read_cnt == 3) { - dprintk((KERN_INFO MYNAM ": 3rd DATA_IN, CDB[0]=%02x\n", - sc->cmnd[0])); - } - } -#if 0 - if (mpt_sdev->sense_sz) { - /* - * Completion of first IO down this path - * *should* invalidate device SenseData... - */ - mpt_sdev->sense_sz = 0; - } -#endif - } + mptscsih_freeChainBuffers(hd, req_idx); + return 1; } -#if 0 -{ - MPT_FRAME_HDR *mf_chk; + dmfprintk((MYIOC_s_INFO_FMT "ScsiDone (mf=%p,mr=%p,sc=%p)\n", + ioc->name, mf, mr, sc)); - /* This, I imagine, is a costly check, but... - * If abort/reset active, check to see if this is a IO - * that completed while ABORT/RESET for it is waiting - * on our taskQ! - */ - if (! Q_IS_EMPTY(&mpt_scsih_taskQ)) { - /* If ABORT for this IO is queued, zap it! */ - mf_chk = search_taskQ(1,sc,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); - if (mf_chk != NULL) { - sc->result = DID_ABORT << 16; - spin_lock_irqsave(sc->host->host_lock, flags); - sc->scsi_done(sc); - spin_unlock_irqrestore(sc->host->host_lock, flags); - return 1; - } - } -} -#endif + atomic_dec(&queue_depth); - if (r != NULL && sc != NULL) { - SCSIIOReply_t *pScsiReply; - SCSIIORequest_t *pScsiReq; - u16 status; + sc->result = DID_OK << 16; /* Set default reply as OK */ + pScsiReq = (SCSIIORequest_t *) mf; + pScsiReply = (SCSIIOReply_t *) mr; + + if (pScsiReply == NULL) { + /* special context reply handling */ - pScsiReply = (SCSIIOReply_t *) r; - pScsiReq = (SCSIIORequest_t *) mf; + /* If regular Inquiry cmd - save inquiry data + */ + if (pScsiReq->CDB[0] == INQUIRY && !(pScsiReq->CDB[1] & 0x3)) { + int dlen; + + dlen = le32_to_cpu(pScsiReq->DataLength); + if (dlen >= SCSI_STD_INQUIRY_BYTES) { + mptscsih_initTarget(hd, + hd->port, + sc->target, + pScsiReq->LUN[1], + sc->buffer, + dlen); + } + } + clear_sense_flag(hd, pScsiReq); + + if (hd->is_spi) + mptscsih_set_dvflags(hd, pScsiReq, sc->buffer); + } else { + u32 xfer_cnt; + u16 status; + u8 scsi_state; status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK; + scsi_state = pScsiReply->SCSIState; - dprintk((KERN_NOTICE MYNAM ": Uh-Oh! (req:sc:reply=%p:%p:%p)\n", mf, sc, r)); + dprintk((KERN_NOTICE " Uh-Oh! (%d:%d:%d) mf=%p, mr=%p, sc=%p\n", + ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], + mf, mr, sc)); dprintk((KERN_NOTICE " IOCStatus=%04xh, SCSIState=%02xh" - ", SCSIStatus=%02xh, IOCLogInfo=%08xh\n", - status, pScsiReply->SCSIState, pScsiReply->SCSIStatus, - le32_to_cpu(pScsiReply->IOCLogInfo))); + ", SCSIStatus=%02xh, IOCLogInfo=%08xh\n", + status, scsi_state, pScsiReply->SCSIStatus, + le32_to_cpu(pScsiReply->IOCLogInfo))); + + if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) + copy_sense_data(sc, hd, mf, pScsiReply); /* * Look for + dump FCP ResponseInfo[]! */ - if (pScsiReply->SCSIState & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { + if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { dprintk((KERN_NOTICE " FCP_ResponseInfo=%08xh\n", le32_to_cpu(pScsiReply->ResponseInfo))); } switch(status) { case MPI_IOCSTATUS_BUSY: /* 0x0002 */ - /*sc->result = DID_BUS_BUSY << 16;*/ /* YIKES! - Seems to - * kill linux interrupt - * handler - */ - sc->result = STS_BUSY; /* Try SCSI BUSY! */ - break; - - case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ - /* Not real sure here... */ - sc->result = DID_OK << 16; + /* CHECKME! + * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry) + * But not: DID_BUS_BUSY lest one risk + * killing interrupt handler:-( + */ + sc->result = STS_BUSY; break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */ @@ -299,10 +391,29 @@ break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ - /* Spoof to SCSI Selection Timeout! */ + /* Spoof to SCSI Selection Timeout! */ sc->result = DID_NO_CONNECT << 16; break; + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ +#ifndef MPT_SCSI_USE_NEW_EH + search_taskQ_for_cmd(sc, hd); +#endif + /* Linux handles an unsolicited DID_RESET better + * than an unsolicited DID_ABORT. + */ + sc->result = DID_RESET << 16; + break; + + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ +#ifndef MPT_SCSI_USE_NEW_EH + search_taskQ_for_cmd(sc, hd); +#endif + sc->result = DID_RESET << 16; + break; + + case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ /* * YIKES! I just discovered that SCSI IO which @@ -312,78 +423,148 @@ * Do upfront check for valid SenseData and give it * precedence! */ + sc->result = (DID_OK << 16) | pScsiReply->SCSIStatus; + clear_sense_flag(hd, pScsiReq); if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { - copy_sense_data(sc, hd, mf, pScsiReply); - sc->result = pScsiReply->SCSIStatus; - break; + /* Have already saved the status and sense data + */ + ; + } else if (pScsiReply->SCSIState & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { + /* What to do? + */ + sc->result = DID_SOFT_ERROR << 16; + } + else if (pScsiReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { + /* Not real sure here either... */ + sc->result = DID_RESET << 16; } - dprintk((KERN_NOTICE MYNAM ": sc->underflow={report ERR if < %02xh bytes xfer'd}\n", sc->underflow)); - dprintk((KERN_NOTICE MYNAM ": ActBytesXferd=%02xh\n", le32_to_cpu(pScsiReply->TransferCount))); + /* Give report and update residual count. + */ + xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); + dprintk((KERN_NOTICE " sc->underflow={report ERR if < %02xh bytes xfer'd}\n", + sc->underflow)); + dprintk((KERN_NOTICE " ActBytesXferd=%02xh\n", xfer_cnt)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) - sc->resid = sc->request_bufflen - le32_to_cpu(pScsiReply->TransferCount); - dprintk((KERN_NOTICE MYNAM ": SET sc->resid=%02xh\n", sc->resid)); + sc->resid = sc->request_bufflen - xfer_cnt; + dprintk((KERN_NOTICE " SET sc->resid=%02xh\n", sc->resid)); #endif - if (pScsiReq->CDB[0] == INQUIRY) { - sc->result = (DID_OK << 16); - break; - } + /* Report Queue Full + */ + if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL) + mptscsih_report_queue_full(sc, pScsiReply, pScsiReq); - /* workaround attempts... */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) - if (sc->resid >= 0x200) { - /* GRRRRR... - * //sc->result = DID_SOFT_ERROR << 16; - * Try spoofing to BUSY - */ - sc->result = STS_BUSY; - } else { - sc->result = 0; + /* If regular Inquiry cmd and some data was transferred, + * save inquiry data + */ + if ( pScsiReq->CDB[0] == INQUIRY + && !(pScsiReq->CDB[1] & 0x3) + && xfer_cnt >= SCSI_STD_INQUIRY_BYTES + ) { + mptscsih_initTarget(hd, + hd->port, + sc->target, + pScsiReq->LUN[1], + sc->buffer, + xfer_cnt); } -#else - sc->result = 0; -#endif - break; - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ - sc->result = DID_ABORT << 16; - break; - - case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ - sc->result = DID_RESET << 16; + if (hd->is_spi) + mptscsih_set_dvflags(hd, pScsiReq, sc->buffer); break; + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ - sc->result = pScsiReply->SCSIStatus; + sc->result = (DID_OK << 16) | pScsiReply->SCSIStatus; + clear_sense_flag(hd, pScsiReq); if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { - copy_sense_data(sc, hd, mf, pScsiReply); - - /* If running agains circa 200003dd 909 MPT f/w, - * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL - * (QUEUE_FULL) returned from device! --> get 0x0000?128 - * and with SenseBytes set to 0. + /* + * If running agains circa 200003dd 909 MPT f/w, + * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL + * (QUEUE_FULL) returned from device! --> get 0x0000?128 + * and with SenseBytes set to 0. */ if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL) mptscsih_report_queue_full(sc, pScsiReply, pScsiReq); + +#ifndef MPT_SCSI_USE_NEW_EH + /* ADDED 20011120 -sralston + * Scsi mid-layer (old_eh) doesn't seem to like it + * when RAID returns SCSIStatus=02 (CHECK CONDITION), + * SenseKey=01 (RECOVERED ERROR), ASC/ASCQ=95/01. + * Seems to be * treating this as a IO error:-( + * + * So just lie about it altogether here. + * + * NOTE: It still gets reported to syslog via + * mpt_ScsiHost_ErrorReport from copy_sense_data + * call far above. + */ + if ( pScsiReply->SCSIStatus == STS_CHECK_CONDITION + && SD_Sense_Key(sc->sense_buffer) == SK_RECOVERED_ERROR + ) { + sc->result = 0; + } +#endif + } - else if (pScsiReply->SCSIState & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { + else if (pScsiReply->SCSIState & + (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS) + ) { /* - * What to do? + * What to do? */ sc->result = DID_SOFT_ERROR << 16; } else if (pScsiReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { /* Not real sure here either... */ - sc->result = DID_ABORT << 16; + sc->result = DID_RESET << 16; + } + else if (pScsiReply->SCSIState & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) { + /* Device Inq. data indicates that it supports + * QTags, but rejects QTag messages. + * This command completed OK. + * + * Not real sure here either so do nothing... */ } if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL) mptscsih_report_queue_full(sc, pScsiReply, pScsiReq); + /* Add handling of: + * Reservation Conflict, Busy, + * Command Terminated, CHECK + */ + + /* If regular Inquiry cmd - save inquiry data + */ + xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); + if ( sc->result == (DID_OK << 16) + && pScsiReq->CDB[0] == INQUIRY + && !(pScsiReq->CDB[1] & 0x3) + && xfer_cnt >= SCSI_STD_INQUIRY_BYTES + ) { + mptscsih_initTarget(hd, + hd->port, + sc->target, + pScsiReq->LUN[1], + sc->buffer, + xfer_cnt); + } + + if (hd->is_spi) + mptscsih_set_dvflags(hd, pScsiReq, sc->buffer); + break; + + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ + if (pScsiReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { + /* Not real sure here either... */ + sc->result = DID_RESET << 16; + } else + sc->result = DID_SOFT_ERROR << 16; break; case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */ @@ -395,50 +576,50 @@ case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */ case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ - case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ - case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */ default: /* - * What to do? + * What to do? */ sc->result = DID_SOFT_ERROR << 16; break; } /* switch(status) */ - dprintk((KERN_NOTICE MYNAM ": sc->result set to %08xh\n", sc->result)); + dprintk((KERN_NOTICE " sc->result set to %08xh\n", sc->result)); + } /* end of address reply case */ + + /* Unmap the DMA buffers, if any. */ + if (sc->use_sg) { + pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer, + sc->use_sg, scsi_to_pci_dma_dir(sc->sc_data_direction)); + } else if (sc->request_bufflen) { + scPrivate *my_priv; + + my_priv = (scPrivate *) &sc->SCp; + pci_unmap_single(ioc->pcidev, (dma_addr_t)(ulong)my_priv->p1, + sc->request_bufflen, + scsi_to_pci_dma_dir(sc->sc_data_direction)); } - if (sc != NULL) { - unsigned long flags; + hd->ScsiLookup[req_idx] = NULL; - /* Unmap the DMA buffers, if any. */ - if (sc->use_sg) { - pci_unmap_sg(ioc->pcidev, - (struct scatterlist *) sc->request_buffer, - sc->use_sg, - scsi_to_pci_dma_dir(sc->sc_data_direction)); - } else if (sc->request_bufflen) { - pci_unmap_single(ioc->pcidev, - (dma_addr_t)((long)sc->SCp.ptr), - sc->request_bufflen, - scsi_to_pci_dma_dir(sc->sc_data_direction)); - } + sc->host_scribble = NULL; /* CHECKME! - Do we need to clear this??? */ - spin_lock_irqsave(sc->host->host_lock, flags); - sc->scsi_done(sc); - spin_unlock_irqrestore(sc->host->host_lock, flags); - } + spin_lock_irqsave(sc->host->host_lock, flags); + sc->scsi_done(sc); /* Issue the command callback */ + spin_unlock_irqrestore(sc->host->host_lock, flags); + /* Free Chain buffers */ + mptscsih_freeChainBuffers(hd, req_idx); return 1; } -#ifndef MPT_SCSI_USE_NEW_EH +#ifndef MPT_SCSI_USE_NEW_EH /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * search_taskQ - Search SCSI task mgmt request queue for specific - * request type + * request type. * @remove: (Boolean) Should request be removed if found? * @sc: Pointer to Scsi_Cmnd structure * @task_type: Task type to search for @@ -447,42 +628,55 @@ * was not found. */ static MPT_FRAME_HDR * -search_taskQ(int remove, Scsi_Cmnd *sc, u8 task_type) +search_taskQ(int remove, Scsi_Cmnd *sc, MPT_SCSI_HOST *hd, u8 task_type) { MPT_FRAME_HDR *mf = NULL; unsigned long flags; int count = 0; int list_sz; - dslprintk((KERN_INFO MYNAM ": spinlock#1\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - list_sz = mpt_scsih_taskQ_cnt; - if (! Q_IS_EMPTY(&mpt_scsih_taskQ)) { - mf = mpt_scsih_taskQ.head; + dprintk((KERN_INFO MYNAM ": search_taskQ(%d,sc=%p,%d) called\n", + remove, sc, task_type)); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + list_sz = hd->taskQcnt; + if (! Q_IS_EMPTY(&hd->taskQ)) { + mf = hd->taskQ.head; do { count++; if (mf->u.frame.linkage.argp1 == sc && mf->u.frame.linkage.arg1 == task_type) { if (remove) { Q_DEL_ITEM(&mf->u.frame.linkage); - mpt_scsih_taskQ_cnt--; + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + + /* Don't save mf into nextmf because + * exit after command has been deleted. + */ + + /* Place the MF back on the FreeQ */ + Q_ADD_TAIL(&hd->ioc->FreeQ, + &mf->u.frame.linkage, + MPT_FRAME_HDR); +#ifdef MFCNT + hd->ioc->mfcnt--; +#endif } break; } - } while ((mf = mf->u.frame.linkage.forw) != (MPT_FRAME_HDR*)&mpt_scsih_taskQ); - if (mf == (MPT_FRAME_HDR*)&mpt_scsih_taskQ) { + } while ((mf = mf->u.frame.linkage.forw) != (MPT_FRAME_HDR*)&hd->taskQ); + if (mf == (MPT_FRAME_HDR*)&hd->taskQ) { mf = NULL; } } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); if (list_sz) { - dprintk((KERN_INFO MYNAM ": search_taskQ(%d,%p,%d) results=%p (%sFOUND%s)!\n", - remove, sc, task_type, + dprintk((KERN_INFO " Results=%p (%sFOUND%s)!\n", mf, mf ? "" : "NOT_", (mf && remove) ? "+REMOVED" : "" )); - dprintk((KERN_INFO MYNAM ": (searched thru %d of %d items on taskQ)\n", + dprintk((KERN_INFO " (searched thru %d of %d items on taskQ)\n", count, list_sz )); } @@ -490,12 +684,336 @@ return mf; } +/* + * clean_taskQ - Clean the SCSI task mgmt request for + * this SCSI host instance. + * @hd: MPT_SCSI_HOST pointer + * + * Returns: None. + */ +static void +clean_taskQ(MPT_SCSI_HOST *hd) +{ + MPT_FRAME_HDR *mf = NULL; + MPT_FRAME_HDR *nextmf = NULL; + MPT_ADAPTER *ioc = hd->ioc; + unsigned long flags; + + dprintk((KERN_INFO MYNAM ": clean_taskQ called\n")); + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&hd->taskQ)) { + mf = hd->taskQ.head; + do { + Q_DEL_ITEM(&mf->u.frame.linkage); + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + + nextmf = mf->u.frame.linkage.forw; + + /* Place the MF back on the FreeQ */ + Q_ADD_TAIL(&ioc->FreeQ, &mf->u.frame.linkage, + MPT_FRAME_HDR); +#ifdef MFCNT + hd->ioc->mfcnt--; +#endif + } while ((mf = nextmf) != (MPT_FRAME_HDR*)&hd->taskQ); + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + return; +} + +/* + * search_taskQ_for_cmd - Search the SCSI task mgmt request queue for + * the specified command. If found, delete + * @hd: MPT_SCSI_HOST pointer + * + * Returns: None. + */ +static void +search_taskQ_for_cmd(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd) +{ + MPT_FRAME_HDR *mf = NULL; + unsigned long flags; + int count = 0; + + dprintk((KERN_INFO MYNAM ": search_taskQ_for_cmd(sc=%p) called\n", sc)); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (! Q_IS_EMPTY(&hd->taskQ)) { + mf = hd->taskQ.head; + do { + count++; + if (mf->u.frame.linkage.argp1 == sc) { + Q_DEL_ITEM(&mf->u.frame.linkage); + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + dprintk((KERN_INFO MYNAM + ": Cmd %p found! Deleting.\n", sc)); + + /* Don't save mf into nextmf because + * exit after command has been deleted. + */ + + /* Place the MF back on the FreeQ */ + Q_ADD_TAIL(&hd->ioc->FreeQ, + &mf->u.frame.linkage, + MPT_FRAME_HDR); +#ifdef MFCNT + hd->ioc->mfcnt--; #endif + break; + } + } while ((mf = mf->u.frame.linkage.forw) != (MPT_FRAME_HDR*)&hd->taskQ); + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + return; +} + +#endif /* } MPT_SCSI_USE_NEW_EH */ + + +/* + * Flush all commands on the doneQ. + * Lock Q when deleting/adding members + * Lock io_request_lock for OS callback. + */ +static void +flush_doneQ(MPT_SCSI_HOST *hd) +{ + MPT_DONE_Q *buffer; + Scsi_Cmnd *SCpnt; + unsigned long flags; + + /* Flush the doneQ. + */ + dprintk((KERN_INFO MYNAM ": flush_doneQ called\n")); + while (1) { + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (Q_IS_EMPTY(&hd->doneQ)) { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + break; + } + + buffer = hd->doneQ.head; + /* Delete from Q + */ + Q_DEL_ITEM(buffer); + + /* Set the Scsi_Cmnd pointer + */ + SCpnt = (Scsi_Cmnd *) buffer->argp; + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + + /* Do the OS callback. + */ + spin_lock_irqsave(SCpnt->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(SCpnt->host->host_lock, flags); + } + + return; +} + +/* + * Search the doneQ for a specific command. If found, delete from Q. + * Calling function will finish processing. + */ +static void +search_doneQ_for_cmd(MPT_SCSI_HOST *hd, Scsi_Cmnd *SCpnt) +{ + unsigned long flags; + MPT_DONE_Q *buffer; + + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->doneQ)) { + buffer = hd->doneQ.head; + do { + Scsi_Cmnd *sc = (Scsi_Cmnd *) buffer->argp; + if (SCpnt == sc) { + Q_DEL_ITEM(buffer); + SCpnt->result = sc->result; + + /* Set the Scsi_Cmnd pointer + */ + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + break; + } + } while ((buffer = buffer->forw) != (MPT_DONE_Q *) &hd->doneQ); + } + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + return; +} + +/* + * mptscsih_flush_running_cmds - For each command found, search + * Scsi_Host instance taskQ and reply to OS. + * Called only if recovering from a FW reload. + * @hd: Pointer to a SCSI HOST structure + * + * Returns: None. + * + * Must be called while new I/Os are being queued. + */ +static void +mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) +{ + Scsi_Cmnd *SCpnt = NULL; + MPT_FRAME_HDR *mf = NULL; + int ii; + int max = hd->ioc->req_depth; + unsigned long flags; + + dprintk((KERN_INFO MYNAM ": flush_ScsiLookup called\n")); + for (ii= 0; ii < max; ii++) { + if ((SCpnt = hd->ScsiLookup[ii]) != NULL) { + + /* Command found. + */ + +#ifndef MPT_SCSI_USE_NEW_EH + /* Search taskQ, if found, delete. + */ + search_taskQ_for_cmd(SCpnt, hd); +#endif + + /* Search pendingQ, if found, + * delete from Q. If found, do not decrement + * queue_depth, command never posted. + */ + if (mptscsih_search_pendingQ(hd, ii) == NULL) + atomic_dec(&queue_depth); + + /* Null ScsiLookup index + */ + hd->ScsiLookup[ii] = NULL; + + mf = MPT_INDEX_2_MFPTR(hd->ioc, ii); + dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n", + mf, SCpnt)); + + /* Set status + * Do OS callback + * Free chain buffers + * Free message frame + */ + SCpnt->result = DID_RESET << 16; + SCpnt->host_scribble = NULL; + spin_lock_irqsave(SCpnt->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); /* Issue the command callback */ + spin_unlock_irqrestore(SCpnt->host->host_lock, flags); + + /* Free Chain buffers */ + mptscsih_freeChainBuffers(hd, ii); + + /* Free Message frames */ + mpt_free_msg_frame(ScsiDoneCtx, hd->ioc->id, mf); + } + } + return; +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_initChainBuffers - Allocate memory for and initialize + * chain buffers, chain buffer control arrays and spinlock. + * @hd: Pointer to MPT_SCSI_HOST structure + * @init: If set, initialize the spin lock. + */ +static int +mptscsih_initChainBuffers (MPT_SCSI_HOST *hd, int init) +{ + MPT_FRAME_HDR *chain; + u8 *mem; + unsigned long flags; + int sz, ii, numChain; + + + /* Chain buffer allocations + * Allocate and initialize tracker structures + */ + if (hd->ioc->req_sz <= 64) + numChain = MPT_SG_REQ_64_SCALE * hd->ioc->req_depth; + else if (hd->ioc->req_sz <= 96) + numChain = MPT_SG_REQ_96_SCALE * hd->ioc->req_depth; + else + numChain = MPT_SG_REQ_128_SCALE * hd->ioc->req_depth; + + sz = numChain * sizeof(int); + + if (hd->ReqToChain == NULL) { + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + return -1; + + hd->ReqToChain = (int *) mem; + } else { + mem = (u8 *) hd->ReqToChain; + } + memset(mem, 0xFF, sz); + + if (hd->ChainToChain == NULL) { + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + return -1; + + hd->ChainToChain = (int *) mem; + } else { + mem = (u8 *) hd->ChainToChain; + } + memset(mem, 0xFF, sz); + + if (hd->ChainBuffer == NULL) { + /* Allocate free chain buffer pool + */ + sz = numChain * hd->ioc->req_sz; + mem = pci_alloc_consistent(hd->ioc->pcidev, sz, &hd->ChainBufferDMA); + if (mem == NULL) + return -1; + + hd->ChainBuffer = (u8*)mem; + } else { + mem = (u8 *) hd->ChainBuffer; + } + memset(mem, 0, sz); + + dprintk((KERN_INFO " ChainBuffer @ %p(%p), sz=%d\n", + hd->ChainBuffer, (void *)(ulong)hd->ChainBufferDMA, sz)); + + /* Initialize the free chain Q. + */ + if (init) { + spin_lock_init(&hd->FreeChainQlock); + } + + spin_lock_irqsave (&hd->FreeChainQlock, flags); + Q_INIT(&hd->FreeChainQ, MPT_FRAME_HDR); + + /* Post the chain buffers to the FreeChainQ. + */ + mem = (u8 *)hd->ChainBuffer; + for (ii=0; ii < numChain; ii++) { + chain = (MPT_FRAME_HDR *) mem; + Q_ADD_TAIL(&hd->FreeChainQ.head, &chain->u.frame.linkage, MPT_FRAME_HDR); + mem += hd->ioc->req_sz; + } + spin_unlock_irqrestore(&hd->FreeChainQlock, flags); + + return 0; +} +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - * Hack! I'd like to report if a device is returning QUEUE_FULL + * Hack! It might be nice to report if a device is returning QUEUE_FULL * but maybe not each and every time... */ static long last_queue_full = 0; @@ -518,8 +1036,12 @@ long time = jiffies; if (time - last_queue_full > 10 * HZ) { - printk(KERN_WARNING MYNAM ": Device reported QUEUE_FULL! SCSI bus:target:lun = %d:%d:%d\n", - 0, sc->target, sc->lun); + char *ioc_str = "ioc?"; + + if (sc->host && sc->host->hostdata) + ioc_str = ((MPT_SCSI_HOST *)sc->host->hostdata)->ioc->name; + printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n", + ioc_str, 0, sc->target, sc->lun); last_queue_full = time; } } @@ -527,7 +1049,7 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int BeenHereDoneThat = 0; -/* SCSI fops start here... */ +/* SCSI host fops start here... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_detect - Register MPT adapter(s) as SCSI host(s) with @@ -546,25 +1068,22 @@ struct Scsi_Host *sh = NULL; MPT_SCSI_HOST *hd = NULL; MPT_ADAPTER *this; + MPT_DONE_Q *freedoneQ; unsigned long flags; - int sz; + int sz, ii; + int numSGE = 0; + int scale; u8 *mem; if (! BeenHereDoneThat++) { show_mptmod_ver(my_NAME, my_VERSION); - if ((ScsiDoneCtx = mpt_register(mptscsih_io_done, MPTSCSIH_DRIVER)) <= 0) { - printk(KERN_ERR MYNAM ": Failed to register callback1 with MPT base driver\n"); - return mpt_scsi_hosts; - } - if ((ScsiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSCSIH_DRIVER)) <= 0) { - printk(KERN_ERR MYNAM ": Failed to register callback2 with MPT base driver\n"); - return mpt_scsi_hosts; - } + ScsiDoneCtx = mpt_register(mptscsih_io_done, MPTSCSIH_DRIVER); + ScsiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSCSIH_DRIVER); + ScsiScanDvCtx = mpt_register(mptscsih_scandv_complete, MPTSCSIH_DRIVER); #ifndef MPT_SCSI_USE_NEW_EH - Q_INIT(&mpt_scsih_taskQ, MPT_FRAME_HDR); - spin_lock_init(&mpt_scsih_taskQ_lock); + spin_lock_init(&mytaskQ_lock); #endif if (mpt_event_register(ScsiDoneCtx, mptscsih_event_process) == 0) { @@ -579,106 +1098,263 @@ /* FIXME! */ } } - dprintk((KERN_INFO MYNAM ": mpt_scsih_detect()\n")); +#ifdef MODULE + /* Evaluate the command line arguments, if any */ + if (mptscsih) + mptscsih_setup(mptscsih); +#endif +#ifndef MPT_SCSI_USE_NEW_EH + atomic_set(&mpt_taskQdepth, 0); +#endif + this = mpt_adapter_find_first(); while (this != NULL) { - /* FIXME! Multi-port (aka FC929) support... - * for (i = 0; i < this->facts.NumberOfPorts; i++) - */ + int portnum; + for (portnum=0; portnum < this->facts.NumberOfPorts; portnum++) { - /* 20010215 -sralston - * Added sanity check on SCSI Initiator-mode enabled - * for this MPT adapter. - */ - if (!(this->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) { - printk(KERN_ERR MYNAM ": Skipping %s because SCSI Initiator mode is NOT enabled!\n", - this->name); - this = mpt_adapter_find_next(this); - continue; - } + /* 20010215 -sralston + * Added sanity check on SCSI Initiator-mode enabled + * for this MPT adapter. + */ + if (!(this->pfacts[portnum].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) { + printk(MYIOC_s_WARN_FMT "Skipping because SCSI Initiator mode is NOT enabled!\n", + this->name); + continue; + } - /* 20010202 -sralston - * Added sanity check on readiness of the MPT adapter. - */ - if (this->last_state != MPI_IOC_STATE_OPERATIONAL) { - printk(KERN_ERR MYNAM ": ERROR - Skipping %s because it's not operational!\n", - this->name); - this = mpt_adapter_find_next(this); - continue; - } + /* 20010202 -sralston + * Added sanity check on readiness of the MPT adapter. + */ + if (this->last_state != MPI_IOC_STATE_OPERATIONAL) { + printk(MYIOC_s_WARN_FMT "Skipping because it's not operational!\n", + this->name); + continue; + } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) - tpnt->proc_dir = &proc_mpt_scsihost; + tpnt->proc_dir = &proc_mpt_scsihost; #endif - sh = scsi_register(tpnt, sizeof(MPT_SCSI_HOST)); - if (sh != NULL) { - save_flags(flags); - cli(); - sh->io_port = 0; - sh->n_io_port = 0; - sh->irq = 0; - - /* Yikes! This is important! - * Otherwise, by default, linux only scans target IDs 0-7! - * - * BUG FIX! 20010618 -sralston & pdelaney - * FC919 testing was encountering "duplicate" FC devices, - * as it turns out because the 919 was returning 512 - * for PortFacts.MaxDevices, causing a wraparound effect - * in SCSI IO requests. So instead of using: - * sh->max_id = this->pfacts[0].MaxDevices - 1 - * we'll use a definitive max here. - */ - sh->max_id = MPT_MAX_FC_DEVICES; - - sh->this_id = this->pfacts[0].PortSCSIID; - - restore_flags(flags); - - hd = (MPT_SCSI_HOST *) sh->hostdata; - hd->ioc = this; - hd->port = 0; /* FIXME! */ - - /* SCSI needs Scsi_Cmnd lookup table! - * (with size equal to req_depth*PtrSz!) - */ - sz = hd->ioc->req_depth * sizeof(void *); - mem = kmalloc(sz, GFP_KERNEL); - if (mem == NULL) - return mpt_scsi_hosts; - - memset(mem, 0, sz); - hd->ScsiLookup = (struct scsi_cmnd **) mem; - - dprintk((KERN_INFO MYNAM ": ScsiLookup @ %p, sz=%d\n", - hd->ScsiLookup, sz)); - - /* SCSI also needs SG buckets/hunk management! - * (with size equal to N * req_sz * req_depth!) - * (where N is number of SG buckets per hunk) - */ - sz = MPT_SG_BUCKETS_PER_HUNK * hd->ioc->req_sz * hd->ioc->req_depth; - mem = pci_alloc_consistent(hd->ioc->pcidev, sz, - &hd->SgHunksDMA); - if (mem == NULL) - return mpt_scsi_hosts; + sh = scsi_register(tpnt, sizeof(MPT_SCSI_HOST)); + if (sh != NULL) { + save_flags(flags); + cli(); + sh->io_port = 0; + sh->n_io_port = 0; + sh->irq = 0; + + /* Yikes! This is important! + * Otherwise, by default, linux + * only scans target IDs 0-7! + * pfactsN->MaxDevices unreliable + * (not supported in early + * versions of the FW). + * max_id = 1 + actual max id, + * max_lun = 1 + actual last lun, + * see hosts.h :o( + */ + if ((int)this->chip_type > (int)FC929) + sh->max_id = MPT_MAX_SCSI_DEVICES; + else { + /* For FC, increase the queue depth + * from MPT_SCSI_CAN_QUEUE (31) + * to MPT_FC_CAN_QUEUE (63). + */ + sh->can_queue = MPT_FC_CAN_QUEUE; + sh->max_id = MPT_MAX_FC_DEVICES<256 ? MPT_MAX_FC_DEVICES : 255; + } + sh->max_lun = MPT_LAST_LUN + 1; - memset(mem, 0, sz); - hd->SgHunks = (u8*)mem; + sh->this_id = this->pfacts[portnum].PortSCSIID; - dprintk((KERN_INFO MYNAM ": SgHunks @ %p(%08x), sz=%d\n", - hd->SgHunks, hd->SgHunksDMA, sz)); + /* OS entry to allow host drivers to force + * a queue depth on a per device basis. + */ + sh->select_queue_depths = mptscsih_select_queue_depths; - hd->qtag_tick = jiffies; + /* Verify that we won't exceed the maximum + * number of chain buffers + * We can optimize: ZZ = req_sz/sizeof(MptSge_t) + * For 32bit SGE's: + * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ + * + (req_sz - 64)/sizeof(MptSge_t) + * A slightly different algorithm is required for + * 64bit SGEs. + */ + scale = this->req_sz/sizeof(MptSge_t); + if (sizeof(MptSge_t) == sizeof(SGESimple32_t)) { + numSGE = 1 + (scale - 1) * (this->facts.MaxChainDepth-1) + scale + + (this->req_sz - 64) / (sizeof(MptSge_t)); + } else if (sizeof(MptSge_t) == sizeof(SGESimple64_t)) { + numSGE = (scale - 1) * (this->facts.MaxChainDepth-1) + scale + + (this->req_sz - 60) / (sizeof(MptSge_t)); + } + + if (numSGE < sh->sg_tablesize) { + /* Reset this value */ + dprintk((MYIOC_s_INFO_FMT + "Resetting sg_tablesize to %d from %d\n", + this->name, numSGE, sh->sg_tablesize)); + sh->sg_tablesize = numSGE; + } + + restore_flags(flags); + + hd = (MPT_SCSI_HOST *) sh->hostdata; + hd->ioc = this; + + if ((int)this->chip_type > (int)FC929) + hd->is_spi = 1; + + if (DmpService && + (this->chip_type == FC919 || this->chip_type == FC929)) + hd->is_multipath = 1; + + hd->port = 0; /* FIXME! */ + + /* SCSI needs Scsi_Cmnd lookup table! + * (with size equal to req_depth*PtrSz!) + */ + sz = hd->ioc->req_depth * sizeof(void *); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + goto done; + + memset(mem, 0, sz); + hd->ScsiLookup = (struct scsi_cmnd **) mem; + + dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n", + this->name, hd->ScsiLookup, sz)); + + if (mptscsih_initChainBuffers(hd, 1) < 0) + goto done; + + /* Allocate memory for free and doneQ's + */ + sz = sh->can_queue * sizeof(MPT_DONE_Q); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + goto done; + + memset(mem, 0xFF, sz); + hd->memQ = mem; + + /* Initialize the free, done and pending Qs. + */ + Q_INIT(&hd->freeQ, MPT_DONE_Q); + Q_INIT(&hd->doneQ, MPT_DONE_Q); + Q_INIT(&hd->pendingQ, MPT_DONE_Q); + spin_lock_init(&hd->freedoneQlock); + + mem = hd->memQ; + for (ii=0; ii < sh->can_queue; ii++) { + freedoneQ = (MPT_DONE_Q *) mem; + Q_ADD_TAIL(&hd->freeQ.head, freedoneQ, MPT_DONE_Q); + mem += sizeof(MPT_DONE_Q); + } + + /* Initialize this Scsi_Host + * internal task Q. + */ + Q_INIT(&hd->taskQ, MPT_FRAME_HDR); + hd->taskQcnt = 0; + + /* Allocate memory for the device structures. + * A non-Null pointer at an offset + * indicates a device exists. + * max_id = 1 + maximum id (hosts.h) + */ + sz = sh->max_id * sizeof(void *); + mem = kmalloc(sz, GFP_KERNEL); + if (mem == NULL) + goto done; + + memset(mem, 0, sz); + hd->Targets = (VirtDevice **) mem; + + dprintk((KERN_INFO " Targets @ %p, sz=%d\n", hd->Targets, sz)); + + + /* Clear the TM flags + */ + hd->tmPending = 0; + hd->resetPending = 0; + hd->abortSCpnt = NULL; + hd->tmPtr = NULL; + hd->numTMrequests = 0; + + /* Clear the pointer used to store + * single-threaded commands, i.e., those + * issued during a bus scan, dv and + * configuration pages. + */ + hd->cmdPtr = NULL; + + /* Attach the SCSI Host to the IOC structure + */ + this->sh = sh; + + /* Initialize this SCSI Hosts' timers + * To use, set the timer expires field + * and add_timer + */ + init_timer(&hd->timer); + hd->timer.data = (unsigned long) hd; + hd->timer.function = mptscsih_timer_expired; + + init_timer(&hd->TMtimer); + hd->TMtimer.data = (unsigned long) hd; + hd->TMtimer.function = mptscsih_taskmgmt_timeout; + hd->qtag_tick = jiffies; + + /* Moved Earlier Pam D */ + /* this->sh = sh; */ + + if (hd->is_spi) { + /* Update with the driver setup + * values. + */ + if (hd->ioc->spi_data.maxBusWidth > driver_setup.max_width) + hd->ioc->spi_data.maxBusWidth = driver_setup.max_width; + if (hd->ioc->spi_data.minSyncFactor < driver_setup.min_sync_fac) + hd->ioc->spi_data.minSyncFactor = driver_setup.min_sync_fac; + + if (hd->ioc->spi_data.minSyncFactor == MPT_ASYNC) + hd->ioc->spi_data.maxSyncOffset = 0; + + hd->negoNvram = 0; +#ifdef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + hd->negoNvram = MPT_SCSICFG_USE_NVRAM; +#endif + if (driver_setup.dv == 0) + hd->negoNvram = MPT_SCSICFG_USE_NVRAM; + + hd->ioc->spi_data.forceDv = 0; + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) + hd->ioc->spi_data.dvStatus[ii] = MPT_SCSICFG_NEGOTIATE; + + + ddvprintk((MYIOC_s_INFO_FMT + "dv %x width %x factor %x \n", + hd->ioc->name, driver_setup.dv, + driver_setup.max_width, + driver_setup.min_sync_fac)); + + } + + mpt_scsi_hosts++; + } + + } /* for each adapter port */ - this->sh = sh; - mpt_scsi_hosts++; - } this = mpt_adapter_find_next(this); } +done: + if (mpt_scsi_hosts > 0) + register_reboot_notifier(&mptscsih_notifier); + return mpt_scsi_hosts; } @@ -699,63 +1375,156 @@ mptscsih_release(struct Scsi_Host *host) { MPT_SCSI_HOST *hd; -#ifndef MPT_SCSI_USE_NEW_EH + int count; unsigned long flags; - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - if (mpt_scsih_taskQ_bh_active) { - int count = 10 * HZ; + hd = (MPT_SCSI_HOST *) host->hostdata; - dprintk((KERN_INFO MYNAM ": Info: Zapping TaskMgmt thread!\n")); +#ifndef MPT_SCSI_USE_NEW_EH +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + spin_lock_irqsave(&dvtaskQ_lock, flags); + dvtaskQ_release = 1; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); +#endif - /* Zap the taskQ! */ - Q_INIT(&mpt_scsih_taskQ, MPT_FRAME_HDR); - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_lock_irqsave(&mytaskQ_lock, flags); + if (mytaskQ_bh_active) { + count = 10 * HZ; - while(mpt_scsih_taskQ_bh_active && --count) { - current->state = TASK_INTERRUPTIBLE; + spin_unlock_irqrestore(&mytaskQ_lock, flags); + dprintk((KERN_INFO MYNAM ": Info: Zapping TaskMgmt thread!\n")); + clean_taskQ(hd); + + while(mytaskQ_bh_active && --count) { + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } if (!count) - printk(KERN_ERR MYNAM ": ERROR! TaskMgmt thread still active!\n"); + printk(KERN_ERR MYNAM ": ERROR - TaskMgmt thread still active!\n"); } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_unlock_irqrestore(&mytaskQ_lock, flags); #endif - hd = (MPT_SCSI_HOST *) host->hostdata; +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + /* Check DV thread active */ + count = 10 * HZ; + spin_lock_irqsave(&dvtaskQ_lock, flags); + while(dvtaskQ_active && --count) { + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + spin_lock_irqsave(&dvtaskQ_lock, flags); + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + if (!count) + printk(KERN_ERR MYNAM ": ERROR - DV thread still active!\n"); +#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) + else + printk(KERN_ERR MYNAM ": DV thread orig %d, count %d\n", 10 * HZ, count); +#endif +#endif + + unregister_reboot_notifier(&mptscsih_notifier); + if (hd != NULL) { - int sz1, sz2; + int sz1, sz2, sz3, sztarget=0; + int szchain = 0; + int szQ = 0; + int scale; + + /* Synchronize disk caches + */ + (void) mptscsih_synchronize_cache(hd, 0); + + sz1 = sz2 = sz3 = 0; + + if (hd->ioc->req_sz <= 64) + scale = MPT_SG_REQ_64_SCALE; + else if (hd->ioc->req_sz <= 96) + scale = MPT_SG_REQ_96_SCALE; + else + scale = MPT_SG_REQ_128_SCALE; - sz1 = sz2 = 0; if (hd->ScsiLookup != NULL) { sz1 = hd->ioc->req_depth * sizeof(void *); kfree(hd->ScsiLookup); hd->ScsiLookup = NULL; } - if (hd->SgHunks != NULL) { + if (hd->ReqToChain != NULL) { + szchain += scale * hd->ioc->req_depth * sizeof(int); + kfree(hd->ReqToChain); + hd->ReqToChain = NULL; + } + + if (hd->ChainToChain != NULL) { + szchain += scale * hd->ioc->req_depth * sizeof(int); + kfree(hd->ChainToChain); + hd->ChainToChain = NULL; + } + + if (hd->ChainBuffer != NULL) { + sz2 = scale * hd->ioc->req_depth * hd->ioc->req_sz; + szchain += sz2; - sz2 = MPT_SG_BUCKETS_PER_HUNK * hd->ioc->req_sz * hd->ioc->req_depth; pci_free_consistent(hd->ioc->pcidev, sz2, - hd->SgHunks, hd->SgHunksDMA); - hd->SgHunks = NULL; + hd->ChainBuffer, hd->ChainBufferDMA); + hd->ChainBuffer = NULL; + } + + if (hd->memQ != NULL) { + szQ = host->can_queue * sizeof(MPT_DONE_Q); + kfree(hd->memQ); + hd->memQ = NULL; } - dprintk((KERN_INFO MYNAM ": Free'd ScsiLookup (%d) and SgHunks (%d) memory\n", sz1, sz2)); + + if (hd->Targets != NULL) { + int max, ii; + + /* + * Free any target structures that were allocated. + */ + if (hd->is_spi) { + max = MPT_MAX_SCSI_DEVICES; + } else { + max = MPT_MAX_FC_DEVICES; + } + for (ii=0; ii < max; ii++) { + if (hd->Targets[ii]) { + kfree(hd->Targets[ii]); + hd->Targets[ii] = NULL; + sztarget += sizeof(VirtDevice); + } + } + + /* + * Free pointer array. + */ + sz3 = max * sizeof(void *); + kfree(hd->Targets); + hd->Targets = NULL; + } + + dprintk((MYIOC_s_INFO_FMT "Free'd ScsiLookup (%d), chain (%d) and Target (%d+%d) memory\n", + hd->ioc->name, sz1, szchain, sz3, sztarget)); + dprintk(("Free'd done and free Q (%d) memory\n", szQ)); } + /* NULL the Scsi_Host pointer + */ + hd->ioc->sh = NULL; + scsi_unregister(host); if (mpt_scsi_hosts) { if (--mpt_scsi_hosts == 0) { -#if 0 - mptscsih_flush_pending(); -#endif mpt_reset_deregister(ScsiDoneCtx); dprintk((KERN_INFO MYNAM ": Deregistered for IOC reset notifications\n")); mpt_event_deregister(ScsiDoneCtx); dprintk((KERN_INFO MYNAM ": Deregistered for IOC event notifications\n")); - mpt_deregister(ScsiDoneCtx); + mpt_deregister(ScsiScanDvCtx); mpt_deregister(ScsiTaskCtx); + mpt_deregister(ScsiDoneCtx); if (info_kbuf != NULL) kfree(info_kbuf); @@ -767,6 +1536,45 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** + * mptscsih_halt - Process the reboot notification + * @nb: Pointer to a struct notifier_block (ignored) + * @event: event (SYS_HALT, SYS_RESTART, SYS_POWER_OFF) + * @buf: Pointer to a data buffer (ignored) + * + * This routine called if a system shutdown or reboot is to occur. + * + * Return NOTIFY_DONE if this is something other than a reboot message. + * NOTIFY_OK if this is a reboot message. + */ +static int +mptscsih_halt(struct notifier_block *nb, ulong event, void *buf) +{ + MPT_ADAPTER *ioc = NULL; + MPT_SCSI_HOST *hd = NULL; + + /* Ignore all messages other than reboot message + */ + if ((event != SYS_RESTART) && (event != SYS_HALT) + && (event != SYS_POWER_OFF)) + return (NOTIFY_DONE); + + for (ioc = mpt_adapter_find_first(); ioc != NULL; ioc = mpt_adapter_find_next(ioc)) { + /* Flush the cache of this adapter + */ + if (ioc->sh) { + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd) { + mptscsih_synchronize_cache(hd, 0); + } + } + } + + unregister_reboot_notifier(&mptscsih_notifier); + return NOTIFY_OK; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** * mptscsih_info - Return information about MPT adapter * @SChost: Pointer to Scsi_Host structure * @@ -794,14 +1602,6 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int max_qd = 1; -#ifdef MPT_DEBUG - static int max_sges = 0; - static int max_xfer = 0; -#endif -#if 0 - static int max_num_sges = 0; - static int max_sgent_len = 0; -#endif #if 0 static int index_log[128]; static int index_ent = 0; @@ -814,6 +1614,47 @@ #else #define ADD_INDEX_LOG(req_ent) do { } while(0) #endif + +#ifdef DROP_TEST +#define DROP_IOC 1 /* IOC to force failures */ +#define DROP_TARGET 3 /* Target ID to force failures */ +#define DROP_THIS_CMD 10000 /* iteration to drop command */ +static int dropCounter = 0; +static int dropTestOK = 0; /* num did good */ +static int dropTestBad = 0; /* num did bad */ +static int dropTestNum = 0; /* total = good + bad + incomplete */ +static int numTotCmds = 0; +static MPT_FRAME_HDR *dropMfPtr = NULL; +static int numTMrequested = 0; +#endif + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_put_msgframe - Wrapper routine to post message frame to F/W. + * @context: Call back context (ScsiDoneCtx, ScsiScanDvCtx) + * @id: IOC id number + * @mf: Pointer to message frame + * + * Handles the call to mptbase for posting request and queue depth + * tracking. + * + * Returns none. + */ +static void +mptscsih_put_msgframe(int context, int id, MPT_FRAME_HDR *mf) +{ + /* Main banana... */ + atomic_inc(&queue_depth); + if (atomic_read(&queue_depth) > max_qd) { + max_qd = atomic_read(&queue_depth); + dprintk((KERN_INFO MYNAM ": Queue depth now %d.\n", max_qd)); + } + + mpt_put_msg_frame(context, id, mf); + + return; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine. @@ -829,154 +1670,96 @@ int mptscsih_qcmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) { - struct Scsi_Host *host; MPT_SCSI_HOST *hd; MPT_FRAME_HDR *mf; SCSIIORequest_t *pScsiReq; + VirtDevice *pTarget; + MPT_DONE_Q *buffer = NULL; + unsigned long flags; + int target; + int lun; int datadir; - u32 len; - u32 sgdir; + u32 datalen; u32 scsictl; u32 scsidir; u32 qtag; - u32 *mptr; - int sge_spill1; - int frm_sz; - int sges_left; - u32 chain_offset; + u32 cmd_len; int my_idx; - int i; - - dmfprintk((KERN_INFO MYNAM "_qcmd: SCpnt=%p, done()=%p\n", - SCpnt, done)); + int ii; + int rc; + int did_errcode; + int issueCmd; - host = SCpnt->host; - hd = (MPT_SCSI_HOST *) host->hostdata; - -#if 0 - if (host->host_busy >= 60) { - MPT_ADAPTER *ioc = hd->ioc; - u16 pci_command, pci_status; - - /* The IOC is probably hung, investigate status. */ - printk("MPI: IOC probably hung IOCSTAT[%08x] INTSTAT[%08x] REPLYFIFO[%08x]\n", - readl(&ioc->chip.fc9xx->DoorbellValue), - readl(&ioc->chip.fc9xx->IntStatus), - readl(&ioc->chip.fc9xx->ReplyFifo)); - pci_read_config_word(ioc->pcidev, PCI_COMMAND, &pci_command); - pci_read_config_word(ioc->pcidev, PCI_STATUS, &pci_status); - printk("MPI: PCI command[%04x] status[%04x]\n", pci_command, pci_status); - { - /* DUMP req index logger. */ - int begin, end; + did_errcode = 0; + hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + target = SCpnt->target; + lun = SCpnt->lun; + SCpnt->scsi_done = done; - begin = (index_ent - 65) & (128 - 1); - end = index_ent & (128 - 1); - printk("MPI: REQ_INDEX_HIST["); - while (begin != end) { - printk("(%04x)", index_log[begin]); - begin = (begin + 1) & (128 - 1); - } - printk("\n"); - } - sti(); - while(1) - barrier(); - } -#endif + pTarget = hd->Targets[target]; - SCpnt->scsi_done = done; + dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n", + (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done)); /* 20000617 -sralston * GRRRRR... Shouldn't have to do this but... * Do explicit check for REQUEST_SENSE and cached SenseData. * If yes, return cached SenseData. */ -#ifdef MPT_SCSI_CACHE_AUTOSENSE - { - MPT_SCSI_DEV *mpt_sdev; - - mpt_sdev = (MPT_SCSI_DEV *) SCpnt->device->hostdata; - if (mpt_sdev && SCpnt->cmnd[0] == REQUEST_SENSE) { - u8 *dest = NULL; - - if (!SCpnt->use_sg) + if (SCpnt->cmnd[0] == REQUEST_SENSE) { + u8 *dest = NULL; + int sz; + + if (pTarget && (pTarget->tflags & MPT_TARGET_FLAGS_VALID_SENSE)) { + pTarget->tflags &= ~MPT_TARGET_FLAGS_VALID_SENSE; //sjr-moved-here + if (!SCpnt->use_sg) { dest = SCpnt->request_buffer; - else { + } else { struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer; if (sg) - dest = (u8 *) (unsigned long)sg_dma_address(sg); + dest = (u8 *)(ulong)sg_dma_address(sg); } - if (dest && mpt_sdev->sense_sz) { - memcpy(dest, mpt_sdev->CachedSense.data, mpt_sdev->sense_sz); -#ifdef MPT_DEBUG - { - int i; - u8 *sb; - - sb = mpt_sdev->CachedSense.data; - if (sb && ((sb[0] & 0x70) == 0x70)) { - printk(KERN_WARNING MYNAM ": Returning last cached SCSI (hex) SenseData:\n"); - printk(KERN_WARNING " "); - for (i = 0; i < (8 + sb[7]); i++) - printk("%s%02x", i == 13 ? "-" : " ", sb[i]); - printk("\n"); - } - } + if (dest) { + sz = MIN (SCSI_STD_SENSE_BYTES, SCpnt->request_bufflen); + memcpy(dest, pTarget->sense, sz); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) + SCpnt->resid = SCpnt->request_bufflen - sz; #endif + SCpnt->result = 0; + SCpnt->scsi_done(SCpnt); + + //sjr-moved-up//pTarget->tflags &= ~MPT_TARGET_FLAGS_VALID_SENSE; + + return 0; } - SCpnt->resid = SCpnt->request_bufflen - mpt_sdev->sense_sz; - SCpnt->result = 0; -/* spin_lock(SCpnt->host->host_lock); */ - SCpnt->scsi_done(SCpnt); -/* spin_unlock(SCpnt->host->host_lock); */ - return 0; } } -#endif - - if ((mf = mpt_get_msg_frame(ScsiDoneCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); -/* return 1; */ - return 0; - } - pScsiReq = (SCSIIORequest_t *) mf; - my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - - ADD_INDEX_LOG(my_idx); - - /* Map the data portion, if any. */ - sges_left = SCpnt->use_sg; - if (sges_left) { - sges_left = pci_map_sg(hd->ioc->pcidev, - (struct scatterlist *) SCpnt->request_buffer, - sges_left, - scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); - } else if (SCpnt->request_bufflen) { - dma_addr_t buf_dma_addr; - - buf_dma_addr = pci_map_single(hd->ioc->pcidev, - SCpnt->request_buffer, - SCpnt->request_bufflen, - scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); - - /* We hide it here for later unmap. */ - SCpnt->SCp.ptr = (char *)(unsigned long) buf_dma_addr; + if (hd->resetPending) { + /* Prevent new commands from being issued + * while reloading the FW. + */ + did_errcode = 1; + goto did_error; } /* * Put together a MPT SCSI request... */ + if ((mf = mpt_get_msg_frame(ScsiDoneCtx, hd->ioc->id)) == NULL) { + dprintk((MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n", + hd->ioc->name)); + did_errcode = 2; + goto did_error; + } - /* Assume SimpleQ, NO DATA XFER for now */ + pScsiReq = (SCSIIORequest_t *) mf; - len = SCpnt->request_bufflen; - sgdir = 0x00000000; /* SGL IN (host<--ioc) */ - scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER; + my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + + ADD_INDEX_LOG(my_idx); /* * The scsi layer should be handling this stuff @@ -985,25 +1768,27 @@ /* BUG FIX! 19991030 -sralston * TUR's being issued with scsictl=0x02000000 (DATA_IN)! - * Seems we may receive a buffer (len>0) even when there + * Seems we may receive a buffer (datalen>0) even when there * will be no data transfer! GRRRRR... */ datadir = mptscsih_io_direction(SCpnt); if (datadir < 0) { + datalen = SCpnt->request_bufflen; scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */ } else if (datadir > 0) { - sgdir = 0x04000000; /* SGL OUT (host-->ioc) */ + datalen = SCpnt->request_bufflen; scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */ } else { - len = 0; + datalen = 0; + scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER; } - qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; - - /* - * Attach tags to the devices + /* Default to untagged. Once a target structure has been allocated, + * use the Inquiry data to determine if device supports tagged. */ - if (SCpnt->device->tagged_supported) { + qtag = MPI_SCSIIO_CONTROL_UNTAGGED; + if (pTarget && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) + && (SCpnt->device->tagged_supported)) { /* * Some drives are too stupid to handle fairness issues * with tagged queueing. We throw in the odd ordered @@ -1012,87 +1797,24 @@ if ((jiffies - hd->qtag_tick) > (5*HZ)) { qtag = MPI_SCSIIO_CONTROL_ORDEREDQ; hd->qtag_tick = jiffies; - -#if 0 - /* These are ALWAYS zero! - * (Because this is a place for the device driver to dynamically - * assign tag numbers any way it sees fit. That's why -DaveM) - */ - dprintk((KERN_DEBUG MYNAM ": sc->device->current_tag = %08x\n", - SCpnt->device->current_tag)); - dprintk((KERN_DEBUG MYNAM ": sc->tag = %08x\n", - SCpnt->tag)); -#endif } -#if 0 - else { - /* Hmmm... I always see value of 0 here, - * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston - * (Because this is a place for the device driver to dynamically - * assign tag numbers any way it sees fit. That's why -DaveM) - * - * if (SCpnt->tag == HEAD_OF_QUEUE_TAG) - */ - if (SCpnt->device->current_tag == HEAD_OF_QUEUE_TAG) - qtag = MPI_SCSIIO_CONTROL_HEADOFQ; - else if (SCpnt->tag == ORDERED_QUEUE_TAG) - qtag = MPI_SCSIIO_CONTROL_ORDEREDQ; - } -#endif + else + qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; } - scsictl = scsidir | qtag; - frm_sz = hd->ioc->req_sz; - - /* Ack! - * sge_spill1 = 9; + /* Use the above information to set up the message frame */ - sge_spill1 = (frm_sz - (sizeof(SCSIIORequest_t) - sizeof(SGEIOUnion_t) + sizeof(SGEChain32_t))) / 8; - /* spill1: for req_sz == 128 (128-48==80, 80/8==10 SGEs max, first time!), --> use 9 - * spill1: for req_sz == 96 ( 96-48==48, 48/8== 6 SGEs max, first time!), --> use 5 - */ - dsgprintk((KERN_INFO MYNAM ": SG: %x spill1 = %d\n", - my_idx, sge_spill1)); - -#ifdef MPT_DEBUG - if (sges_left > max_sges) { - max_sges = sges_left; - dprintk((KERN_INFO MYNAM ": MPT_MaxSges = %d\n", max_sges)); - } -#endif -#if 0 - if (sges_left > max_num_sges) { - max_num_sges = sges_left; - printk(KERN_INFO MYNAM ": MPT_MaxNumSges = %d\n", max_num_sges); - } -#endif - - dsgprintk((KERN_INFO MYNAM ": SG: %x sges_left = %d (initially)\n", - my_idx, sges_left)); - - chain_offset = 0; - if (sges_left > (sge_spill1+1)) { -#if 0 - chain_offset = 0x1E; -#endif - chain_offset = (frm_sz - 8) / 4; - } - - pScsiReq->TargetID = SCpnt->target; + pScsiReq->TargetID = target; pScsiReq->Bus = hd->port; - pScsiReq->ChainOffset = chain_offset; + pScsiReq->ChainOffset = 0; pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; pScsiReq->CDBLength = SCpnt->cmd_len; - -/* We have 256 bytes alloc'd per IO; let's use it. */ -/* pScsiReq->SenseBufferLength = SNS_LEN(SCpnt); */ - pScsiReq->SenseBufferLength = 255; - + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; pScsiReq->Reserved = 0; - pScsiReq->MsgFlags = 0; + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; pScsiReq->LUN[0] = 0; - pScsiReq->LUN[1] = SCpnt->lun; + pScsiReq->LUN[1] = lun; pScsiReq->LUN[2] = 0; pScsiReq->LUN[3] = 0; pScsiReq->LUN[4] = 0; @@ -1104,223 +1826,780 @@ /* * Write SCSI CDB into the message */ - for (i = 0; i < 12; i++) - pScsiReq->CDB[i] = SCpnt->cmnd[i]; - for (i = 12; i < 16; i++) - pScsiReq->CDB[i] = 0; + cmd_len = SCpnt->cmd_len; + for (ii=0; ii < cmd_len; ii++) + pScsiReq->CDB[ii] = SCpnt->cmnd[ii]; + for (ii=cmd_len; ii < 16; ii++) + pScsiReq->CDB[ii] = 0; /* DataLength */ - pScsiReq->DataLength = cpu_to_le32(len); + pScsiReq->DataLength = cpu_to_le32(datalen); /* SenseBuffer low address */ - pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_pool_dma + (my_idx * 256)); + pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma + + (my_idx * MPT_SENSE_BUFFER_ALLOC)); + + /* Now add the SG list + * Always have a SGE even if null length. + */ + rc = SUCCESS; + if (datalen == 0) { + /* Add a NULL SGE */ + mptscsih_AddNullSGE(pScsiReq); + } else { + /* Add a 32 or 64 bit SGE */ + rc = mptscsih_Add32BitSGE(hd, SCpnt, pScsiReq, my_idx); + } - mptr = (u32 *) &pScsiReq->SGL; - /* - * Now fill in the SGList... - * NOTES: For 128 byte req_sz, we can hold up to 10 simple SGE's - * in the remaining request frame. We -could- do unlimited chains - * but each chain buffer can only be req_sz bytes in size, and - * we lose one SGE whenever we chain. - * For 128 req_sz, we can hold up to 16 SGE's per chain buffer. - * For practical reasons, limit ourselves to 1 overflow chain buffer; - * giving us 9 + 16 == 25 SGE's max. - * At 4 Kb per SGE, that yields 100 Kb max transfer. - * - * (This code needs to be completely changed when/if 64-bit DMA - * addressing is used, since we will be able to fit much less than - * 10 embedded SG entries. -DaveM) - */ - if (sges_left) { - struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer; - u32 v1, v2; - int sge_spill2; - int sge_cur_spill; - int sgCnt; - u8 *pSgBucket; - int chain_sz; - - len = 0; - - /* sge_spill2 = 15; - * spill2: for req_sz == 128 (128/8==16 SGEs max, first time!), --> use 15 - * spill2: for req_sz == 96 ( 96/8==12 SGEs max, first time!), --> use 11 - */ - sge_spill2 = frm_sz / 8 - 1; - dsgprintk((KERN_INFO MYNAM ": SG: %x spill2 = %d\n", - my_idx, sge_spill2)); - - pSgBucket = NULL; - sgCnt = 0; - sge_cur_spill = sge_spill1; - while (sges_left) { -#if 0 - if (sg_dma_len(sg) > max_sgent_len) { - max_sgent_len = sg_dma_len(sg); - printk(KERN_INFO MYNAM ": MPT_MaxSgentLen = %d\n", max_sgent_len); - } -#endif - /* Write one simple SGE */ - v1 = sgdir | 0x10000000 | sg_dma_len(sg); - len += sg_dma_len(sg); - v2 = sg_dma_address(sg); - dsgprintk((KERN_INFO MYNAM ": SG: %x Writing SGE @%p: %08x %08x, sges_left=%d\n", - my_idx, mptr, v1, v2, sges_left)); - *mptr++ = cpu_to_le32(v1); - *mptr++ = cpu_to_le32(v2); - sg++; - sgCnt++; - - if (--sges_left == 0) { - /* re-write 1st word of previous SGE with SIMPLE, - * LE, EOB, and EOL bits! - */ - v1 = 0xD1000000 | sgdir | sg_dma_len(sg-1); - dsgprintk((KERN_INFO MYNAM ": SG: %x (re)Writing SGE @%p: %08x (VERY LAST SGE!)\n", - my_idx, mptr-2, v1)); - *(mptr - 2) = cpu_to_le32(v1); - } else { - if ((sges_left > 1) && ((sgCnt % sge_cur_spill) == 0)) { - dsgprintk((KERN_INFO MYNAM ": SG: %x SG spill at modulo 0!\n", - my_idx)); - - /* Fixup previous SGE with LE bit! */ - v1 = sgdir | 0x90000000 | sg_dma_len(sg-1); - dsgprintk((KERN_INFO MYNAM ": SG: %x (re)Writing SGE @%p: %08x (LAST BUCKET SGE!)\n", - my_idx, mptr-2, v1)); - *(mptr - 2) = cpu_to_le32(v1); - - chain_offset = 0; - /* Going to need another chain? */ - if (sges_left > (sge_spill2+1)) { -#if 0 - chain_offset = 0x1E; + if (rc == SUCCESS) { + hd->ScsiLookup[my_idx] = SCpnt; + SCpnt->host_scribble = NULL; + +#ifdef DROP_TEST + numTotCmds++; + /* If the IOC number and target match, increment + * counter. If counter matches DROP_THIS, do not + * issue command to FW to force a reset. + * Save the MF pointer so we can free resources + * when task mgmt completes. + */ + if ((hd->ioc->id == DROP_IOC) && (target == DROP_TARGET)) { + dropCounter++; + + if (dropCounter == DROP_THIS_CMD) { + dropCounter = 0; + + /* If global is set, then we are already + * doing something - so keep issuing commands. + */ + if (dropMfPtr == NULL) { + dropTestNum++; + dropMfPtr = mf; + atomic_inc(&queue_depth); + printk(MYIOC_s_INFO_FMT + "Dropped SCSI cmd (%p)\n", + hd->ioc->name, SCpnt); + printk("mf (%p) req (%4x) tot cmds (%d)\n", + mf, my_idx, numTotCmds); + + return 0; + } + } + } #endif - chain_offset = (frm_sz - 8) / 4; - chain_sz = frm_sz; - } else { - chain_sz = sges_left * 8; - } - /* write chain SGE at mptr. */ - v1 = 0x30000000 | chain_offset<<16 | chain_sz; - if (pSgBucket == NULL) { - pSgBucket = hd->SgHunks - + (my_idx * frm_sz * MPT_SG_BUCKETS_PER_HUNK); - } else { - pSgBucket += frm_sz; + /* SCSI specific processing */ + issueCmd = 1; + if (hd->is_spi) { + int dvStatus = hd->ioc->spi_data.dvStatus[target]; + + if (dvStatus || hd->ioc->spi_data.forceDv) { + + /* Write SDP1 on 1st I/O to this target */ + if (dvStatus & MPT_SCSICFG_NEGOTIATE) { + mptscsih_writeSDP1(hd, 0, target, hd->negoNvram); + dvStatus &= ~MPT_SCSICFG_NEGOTIATE; + hd->ioc->spi_data.dvStatus[target] = dvStatus; + } + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + if ((dvStatus & MPT_SCSICFG_NEED_DV) || hd->ioc->spi_data.forceDv) { + unsigned long lflags; + /* Schedule DV if necessary */ + spin_lock_irqsave(&dvtaskQ_lock, lflags); + if (!dvtaskQ_active) { + dvtaskQ_active = 1; + mptscsih_dvTask.sync = 0; + mptscsih_dvTask.routine = mptscsih_domainValidation; + mptscsih_dvTask.data = (void *) hd; + + SCHEDULE_TASK(&mptscsih_dvTask); } - v2 = (hd->SgHunksDMA + - ((u8 *)pSgBucket - (u8 *)hd->SgHunks)); - dsgprintk((KERN_INFO MYNAM ": SG: %x Writing SGE @%p: %08x %08x (CHAIN!)\n", - my_idx, mptr, v1, v2)); - *(mptr++) = cpu_to_le32(v1); - *(mptr) = cpu_to_le32(v2); + hd->ioc->spi_data.forceDv = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, lflags); + } - mptr = (u32 *) pSgBucket; - sgCnt = 0; - sge_cur_spill = sge_spill2; + /* Trying to do DV to this target, extend timeout. + * Wait to issue intil flag is clear + */ + if (dvStatus & MPT_SCSICFG_DV_PENDING) { + mod_timer(&SCpnt->eh_timeout, jiffies + 40 * HZ); + issueCmd = 0; } +#endif + } + } + + if (issueCmd) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p)\n", + hd->ioc->name, SCpnt)); + } else { + ddvtprintk((MYIOC_s_INFO_FMT "Pending SCSI cmd (%p)\n", + hd->ioc->name, SCpnt)); + /* Place this command on the pendingQ if possible */ + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->freeQ)) { + buffer = hd->freeQ.head; + Q_DEL_ITEM(buffer); + + /* Save the mf pointer + */ + buffer->argp = (void *)mf; + + /* Add to the pendingQ + */ + Q_ADD_TAIL(&hd->pendingQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + } else { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + SCpnt->result = (DID_BUS_BUSY << 16); + SCpnt->scsi_done(SCpnt); } } } else { - dsgprintk((KERN_INFO MYNAM ": SG: non-SG for %p, len=%d\n", - SCpnt, SCpnt->request_bufflen)); + mptscsih_freeChainBuffers(hd, my_idx); + mpt_free_msg_frame(ScsiDoneCtx, hd->ioc->id, mf); + did_errcode = 3; + goto did_error; + } + + return 0; + +did_error: + dprintk((MYIOC_s_WARN_FMT "_qcmd did_errcode=%d (sc=%p)\n", + hd->ioc->name, did_errcode, SCpnt)); + /* Just wish OS to issue a retry */ + SCpnt->result = (DID_BUS_BUSY << 16); + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->freeQ)) { + buffer = hd->freeQ.head; + Q_DEL_ITEM(buffer); + + /* Set the Scsi_Cmnd pointer + */ + buffer->argp = (void *)SCpnt; + + /* Add to the doneQ + */ + Q_ADD_TAIL(&hd->doneQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + } else { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + SCpnt->scsi_done(SCpnt); + } - if (len > 0) { - dma_addr_t buf_dma_addr; + return 0; +} - buf_dma_addr = (dma_addr_t) (unsigned long)SCpnt->SCp.ptr; - *(mptr++) = cpu_to_le32(0xD1000000|sgdir|SCpnt->request_bufflen); - *(mptr++) = cpu_to_le32(buf_dma_addr); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_Add32BitSGE - Add a 32Bit SGE (plus chain buffers) to the + * SCSIIORequest_t Message Frame. + * @hd: Pointer to MPT_SCSI_HOST structure + * @SCpnt: Pointer to Scsi_Cmnd structure + * @pReq: Pointer to SCSIIORequest_t structure + * + * Returns ... + */ +static int +mptscsih_Add32BitSGE(MPT_SCSI_HOST *hd, Scsi_Cmnd *SCpnt, + SCSIIORequest_t *pReq, int req_idx) +{ + MptSge_t *psge; + MptChain_t *chainSge; + struct scatterlist *sg; + int frm_sz; + int sges_left, sg_done; + int chain_idx = MPT_HOST_NO_CHAIN; + int sgeOffset; + int numSgeSlots, numSgeThisFrame; + u32 sgflags, sgdir, len, thisxfer = 0; + int offset; + int newIndex; + int ii; + dma_addr_t v2; + + sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; + if (sgdir == MPI_SCSIIO_CONTROL_WRITE) { + sgdir = MPT_TRANSFER_HOST_TO_IOC; + } else { + sgdir = MPT_TRANSFER_IOC_TO_HOST; + } + + psge = (MptSge_t *) &pReq->SGL; + frm_sz = hd->ioc->req_sz; + + /* Map the data portion, if any. + * sges_left = 0 if no data transfer. + */ + sges_left = SCpnt->use_sg; + if (SCpnt->use_sg) { + sges_left = pci_map_sg(hd->ioc->pcidev, + (struct scatterlist *) SCpnt->request_buffer, + SCpnt->use_sg, + scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); + } else if (SCpnt->request_bufflen) { + dma_addr_t buf_dma_addr; + scPrivate *my_priv; + + buf_dma_addr = pci_map_single(hd->ioc->pcidev, + SCpnt->request_buffer, + SCpnt->request_bufflen, + scsi_to_pci_dma_dir(SCpnt->sc_data_direction)); + + /* We hide it here for later unmap. */ + my_priv = (scPrivate *) &SCpnt->SCp; + my_priv->p1 = (void *)(ulong) buf_dma_addr; + + dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n", + hd->ioc->name, SCpnt, SCpnt->request_bufflen)); + + /* 0xD1000000 = LAST | EOB | SIMPLE | EOL */ + psge->FlagsLength = cpu_to_le32( + 0xD1000000|sgdir|SCpnt->request_bufflen); + cpu_to_leXX(buf_dma_addr, psge->Address); + + return SUCCESS; + } + + /* Handle the SG case. + */ + sg = (struct scatterlist *) SCpnt->request_buffer; + sg_done = 0; + sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION); + chainSge = NULL; + + /* Prior to entering this loop - the following must be set + * current MF: sgeOffset (bytes) + * chainSge (Null if original MF is not a chain buffer) + * sg_done (num SGE done for this MF) + */ + +nextSGEset: + numSgeSlots = ((frm_sz - sgeOffset) / sizeof(MptSge_t)); + numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; + + sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; + + /* Get first (num - 1) SG elements + * Skip any SG entries with a length of 0 + * NOTE: at finish, sg and psge pointed to NEXT data/location positions + */ + for (ii=0; ii < (numSgeThisFrame-1); ii++) { + thisxfer = sg_dma_len(sg); + if (thisxfer == 0) { + sg ++; /* Get next SG element from the OS */ + sg_done++; + continue; } + + len += thisxfer; + psge->FlagsLength = cpu_to_le32( sgflags | thisxfer ); + v2 = sg_dma_address(sg); + cpu_to_leXX(v2, psge->Address); + + sg++; /* Get next SG element from the OS */ + psge++; /* Point to next SG location in this MF */ + sgeOffset += sizeof(MptSge_t); + sg_done++; } -#ifdef MPT_DEBUG - /* if (SCpnt->request_bufflen > max_xfer) */ - if (len > max_xfer) { - max_xfer = len; - dprintk((KERN_INFO MYNAM ": MPT_MaxXfer = %d\n", max_xfer)); + if (numSgeThisFrame == sges_left) { + /* Add last element, end of buffer and end of list flags. + */ + sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT | + MPT_SGE_FLAGS_END_OF_BUFFER | + MPT_SGE_FLAGS_ADDRESSING | + MPT_SGE_FLAGS_END_OF_LIST; + + /* Add last SGE and set termination flags. + * Note: Last SGE may have a length of 0 - which should be ok. + */ + thisxfer = sg_dma_len(sg); + len += thisxfer; + + psge->FlagsLength = cpu_to_le32( sgflags | thisxfer ); + v2 = sg_dma_address(sg); + cpu_to_leXX(v2, psge->Address); + + sg_done++; + + if (chainSge) { + /* The current buffer is a chain buffer, + * but there is not another one. + * Update the chain element + * Offset and Length fields. + */ + chainSge->NextChainOffset = 0; + sgeOffset += sizeof(MptSge_t); + chainSge->Length = cpu_to_le16(sgeOffset); + } else { + /* The current buffer is the original MF + * and there is no Chain buffer. + */ + pReq->ChainOffset = 0; + } + } else { + /* At least one chain buffer is needed. + * Complete the first MF + * - last SGE element, set the LastElement bit + * - set ChainOffset (words) for orig MF + * (OR finish previous MF chain buffer) + * - update MFStructPtr ChainIndex + * - Populate chain element + * Also + * Loop until done. + */ + + dsgprintk((MYIOC_s_INFO_FMT "SG: Chain Required! sg done %d\n", + hd->ioc->name, sg_done)); + + /* Set LAST_ELEMENT flag for last non-chain element + * in the buffer. Since psge points at the NEXT + * SGE element, go back one SGE element, update the flags + * and reset the pointer. (Note: sgflags & thisxfer are already + * set properly). + */ + if (sg_done) { + psge--; + sgflags = le32_to_cpu (psge->FlagsLength); + sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; + psge->FlagsLength = cpu_to_le32( sgflags ); + psge++; + } + + if (chainSge) { + /* The current buffer is a chain buffer. + * chainSge points to the previous Chain Element. + * Update its chain element Offset and Length (must + * include chain element size) fields. + * Old chain element is now complete. + */ + chainSge->NextChainOffset = (u8) (sgeOffset >> 2); + sgeOffset += sizeof(MptSge_t); + chainSge->Length = cpu_to_le16(sgeOffset); + } else { + /* The original MF buffer requires a chain buffer - + * set the offset. + * Last element in this MF is a chain element. + */ + pReq->ChainOffset = (u8) (sgeOffset >> 2); + } + + sges_left -= sg_done; + + + /* NOTE: psge points to the beginning of the chain element + * in current buffer. Get a chain buffer. + */ + if ((mptscsih_getFreeChainBuffer(hd, &newIndex)) == FAILED) + return FAILED; + + /* Update the tracking arrays. + * If chainSge == NULL, update ReqToChain, else ChainToChain + */ + if (chainSge) { + hd->ChainToChain[chain_idx] = newIndex; + } else { + hd->ReqToChain[req_idx] = newIndex; + } + chain_idx = newIndex; + offset = hd->ioc->req_sz * chain_idx; + + /* Populate the chainSGE for the current buffer. + * - Set chain buffer pointer to psge and fill + * out the Address and Flags fields. + */ + chainSge = (MptChain_t *) psge; + chainSge->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; + cpu_to_leXX ((hd->ChainBufferDMA + offset), chainSge->Address); + + dsgprintk((KERN_INFO " Current buff @ %p (index 0x%x)", + psge, req_idx)); + + /* Start the SGE for the next buffer + */ + psge = (MptSge_t *) (hd->ChainBuffer + offset); + sgeOffset = 0; + sg_done = 0; + + dsgprintk((KERN_INFO " Chain buff @ %p (index 0x%x)\n", + psge, chain_idx)); + + /* Start the SGE for the next buffer + */ + + goto nextSGEset; } -#endif - hd->ScsiLookup[my_idx] = SCpnt; + return SUCCESS; +} - /* Main banana... */ - mpt_put_msg_frame(ScsiDoneCtx, hd->ioc->id, mf); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_AddNullSGE - Add a NULL SGE to the SCSIIORequest_t + * Message Frame. + * @pReq: Pointer to SCSIIORequest_t structure + */ +static void +mptscsih_AddNullSGE(SCSIIORequest_t *pReq) +{ + MptSge_t *psge; - atomic_inc(&queue_depth); - if (atomic_read(&queue_depth) > max_qd) { - max_qd = atomic_read(&queue_depth); - dprintk((KERN_INFO MYNAM ": Queue depth now %d.\n", max_qd)); + psge = (MptSge_t *) &pReq->SGL; + psge->FlagsLength = cpu_to_le32(MPT_SGE_FLAGS_SSIMPLE_READ | 0); + + cpu_to_leXX( (dma_addr_t) -1, psge->Address); + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_getFreeChainBuffes - Function to get a free chain + * from the MPT_SCSI_HOST FreeChainQ. + * @hd: Pointer to the MPT_SCSI_HOST instance + * @req_idx: Index of the SCSI IO request frame. (output) + * + * return SUCCESS or FAILED + */ +static int +mptscsih_getFreeChainBuffer(MPT_SCSI_HOST *hd, int *retIndex) +{ + MPT_FRAME_HDR *chainBuf = NULL; + unsigned long flags; + int rc = FAILED; + int chain_idx = MPT_HOST_NO_CHAIN; + + //spin_lock_irqsave(&hd->FreeChainQlock, flags); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!Q_IS_EMPTY(&hd->FreeChainQ)) { + + int offset; + + chainBuf = hd->FreeChainQ.head; + Q_DEL_ITEM(&chainBuf->u.frame.linkage); + offset = (u8 *)chainBuf - (u8 *)hd->ChainBuffer; + chain_idx = offset / hd->ioc->req_sz; + rc = SUCCESS; } + //spin_unlock_irqrestore(&hd->FreeChainQlock, flags); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - dmfprintk((KERN_INFO MYNAM ": Issued SCSI cmd (%p)\n", SCpnt)); - return 0; + *retIndex = chain_idx; + + dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n", + hd->ioc->name, *retIndex, chainBuf)); + + return rc; } -#ifdef MPT_SCSI_USE_NEW_EH /* { */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* - mptscsih_abort - Returns: 0=SUCCESS, else FAILED -*/ + * mptscsih_freeChainBuffers - Function to free chain buffers associated + * with a SCSI IO request + * @hd: Pointer to the MPT_SCSI_HOST instance + * @req_idx: Index of the SCSI IO request frame. + * + * Called if SG chain buffer allocation fails and mptscsih callbacks. + * No return. + */ +static void +mptscsih_freeChainBuffers(MPT_SCSI_HOST *hd, int req_idx) +{ + MPT_FRAME_HDR *chain = NULL; + unsigned long flags; + int chain_idx; + int next; + + /* Get the first chain index and reset + * tracker state. + */ + chain_idx = hd->ReqToChain[req_idx]; + hd->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN; + + while (chain_idx != MPT_HOST_NO_CHAIN) { + + /* Save the next chain buffer index */ + next = hd->ChainToChain[chain_idx]; + + /* Free this chain buffer and reset + * tracker + */ + hd->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN; + + chain = (MPT_FRAME_HDR *) (hd->ChainBuffer + + (chain_idx * hd->ioc->req_sz)); + //spin_lock_irqsave(&hd->FreeChainQlock, flags); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + Q_ADD_TAIL(&hd->FreeChainQ.head, + &chain->u.frame.linkage, MPT_FRAME_HDR); + //spin_unlock_irqrestore(&hd->FreeChainQlock, flags); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + dmfprintk((MYIOC_s_INFO_FMT "FreeChainBuffers (index %d)\n", + hd->ioc->name, chain_idx)); + + /* handle next */ + chain_idx = next; + } + return; +} + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mptscsih_abort - Abort linux Scsi_Cmnd routine, new_eh variant - * @SCpnt: Pointer to Scsi_Cmnd structure, IO to be aborted +/* + * Reset Handling + */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_TMHandler - Generic handler for SCSI Task Management. + * Fall through to mpt_HardResetHandler if: not operational, too many + * failed TM requests or handshake failure. * - * (linux Scsi_Host_Template.eh_abort_handler routine) + * @ioc: Pointer to MPT_ADAPTER structure + * @type: Task Management type + * @target: Logical Target ID for reset (if appropriate) + * @lun: Logical Unit for reset (if appropriate) + * @ctx2abort: Context for the task to be aborted (if appropriate) + * @sleepFlag: If set, use udelay instead of schedule in handshake code. * - * Returns SUCCESS or FAILED. + * Remark: Currently invoked from a non-interrupt thread (_bh). + * + * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC + * will be active. + * + * Returns 0 for SUCCESS or -1 if FAILED. */ -int -mptscsih_abort(Scsi_Cmnd * SCpnt) +static int +mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; - MPT_SCSI_HOST *hd; - u32 *msg; - u32 ctx2abort; - int i; + MPT_ADAPTER *ioc = NULL; + int rc = -1; + int doTask = 1; + u32 ioc_raw_state; unsigned long flags; - printk(KERN_WARNING MYNAM ": Attempting _ABORT SCSI IO (=%p)\n", SCpnt); - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + /* If FW is being reloaded currently, return success to + * the calling function. + */ + if (!hd) + return 0; - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + ioc = hd->ioc; + dtmprintk((MYIOC_s_INFO_FMT "TMHandler Entered!\n", ioc->name)); + + if (ioc == NULL) { + printk(KERN_ERR MYNAM " TMHandler" " NULL ioc!\n"); + return 0; + } + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + spin_lock_irqsave(&ioc->diagLock, flags); + if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) { + spin_unlock_irqrestore(&ioc->diagLock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->diagLock, flags); + + /* Do not do a Task Management if there are + * too many failed TMs on this adapter. + */ + if (hd->numTMrequests > MPT_HOST_TOO_MANY_TM) + doTask = 0; + + /* Is operational? + */ + ioc_raw_state = mpt_GetIocState(hd->ioc, 0); + +#ifdef MPT_DEBUG_RESET + if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { + printk(MYIOC_s_WARN_FMT + "TM Handler: IOC Not operational! state 0x%x Calling HardResetHandler\n", + hd->ioc->name, ioc_raw_state); + } +#endif + + if (doTask && ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) + && !(ioc_raw_state & MPI_DOORBELL_ACTIVE)) { + + /* Isse the Task Mgmt request. + */ + rc = mptscsih_IssueTaskMgmt(hd, type, target, lun, ctx2abort, sleepFlag); + if (rc) { + printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n", hd->ioc->name); + } else { + printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt Successful!\n", hd->ioc->name); + } + } +#ifdef DROP_TEST + numTMrequested++; + if (numTMrequested > 5) { + rc = 0; /* set to 1 to force a hard reset */ + numTMrequested = 0; + } +#endif + + if (rc) { + dtmprintk((MYIOC_s_INFO_FMT "Falling through to HardReset! \n", + hd->ioc->name)); + rc = mpt_HardResetHandler(hd->ioc, sleepFlag); + } + + dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc)); +#ifndef MPT_SCSI_USE_NEW_EH + dtmprintk((MYIOC_s_INFO_FMT "TMHandler: _bh_handler state (%d) taskQ count (%d)\n", + ioc->name, mytaskQ_bh_active, hd->taskQcnt)); +#endif + + return rc; +} + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_IssueTaskMgmt - Generic send Task Management function. + * @hd: Pointer to MPT_SCSI_HOST structure + * @type: Task Management type + * @target: Logical Target ID for reset (if appropriate) + * @lun: Logical Unit for reset (if appropriate) + * @ctx2abort: Context for the task to be aborted (if appropriate) + * @sleepFlag: If set, use udelay instead of schedule in handshake code. + * + * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) + * or a non-interrupt thread. In the former, must not call schedule(). + * + * Not all fields are meaningfull for all task types. + * + * Returns 0 for SUCCESS, -999 for "no msg frames", + * else other non-zero value returned. + */ +static int +mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 target, u8 lun, int ctx2abort, int sleepFlag) +{ + MPT_FRAME_HDR *mf; + SCSITaskMgmt_t *pScsiTm; + int ii; + int retval = 0; + + /* Return Fail to calling function if no message frames available. + */ if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); - return FAILED; + dtmprintk((MYIOC_s_WARN_FMT "IssueTaskMgmt, no msg frames!!\n", + hd->ioc->name)); + //return FAILED; + return -999; } + dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n", + hd->ioc->name, mf)); + /* Format the Request + */ pScsiTm = (SCSITaskMgmt_t *) mf; - msg = (u32 *) mf; - - pScsiTm->TargetID = SCpnt->target; + pScsiTm->TargetID = target; pScsiTm->Bus = hd->port; pScsiTm->ChainOffset = 0; pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; pScsiTm->Reserved = 0; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + pScsiTm->TaskType = type; pScsiTm->Reserved1 = 0; pScsiTm->MsgFlags = 0; - for (i = 0; i < 8; i++) { - u8 val = 0; - if (i == 1) - val = SCpnt->lun; - pScsiTm->LUN[i] = val; + for (ii= 0; ii < 8; ii++) { + pScsiTm->LUN[ii] = 0; + } + pScsiTm->LUN[1] = lun; + + for (ii=0; ii < 7; ii++) + pScsiTm->Reserved2[ii] = 0; + + pScsiTm->TaskMsgContext = ctx2abort; + dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt, ctx2abort (0x%08x), type (%d)\n", + hd->ioc->name, ctx2abort, type)); + + /* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake + mpt_put_msg_frame(hd->ioc->id, mf); + * Save the MF pointer in case the request times out. + */ + hd->tmPtr = mf; + hd->numTMrequests++; + hd->TMtimer.expires = jiffies + HZ*20; /* 20 seconds */ + add_timer(&hd->TMtimer); + + if ((retval = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, + sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, sleepFlag)) + != 0) { + dtmprintk((MYIOC_s_WARN_FMT "_send_handshake FAILED!" + " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, hd->ioc, mf)); + hd->numTMrequests--; + hd->tmPtr = NULL; + del_timer(&hd->TMtimer); + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + return ii; + } + + return retval; +} + +#ifdef MPT_SCSI_USE_NEW_EH /* { */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_abort - Abort linux Scsi_Cmnd routine, new_eh variant + * @SCpnt: Pointer to Scsi_Cmnd structure, IO to be aborted + * + * (linux Scsi_Host_Template.eh_abort_handler routine) + * + * Returns SUCCESS or FAILED. + */ +int +mptscsih_abort(Scsi_Cmnd * SCpnt) +{ + MPT_SCSI_HOST *hd; + MPT_FRAME_HDR *mf; + unsigned long flags; + u32 ctx2abort; + int scpnt_idx; + u8 type; + + printk(KERN_WARNING MYNAM ": Attempting ABORT SCSI IO (=%p)\n", SCpnt); + printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } + + /* Find this command + */ + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. If found in + * doneQ, delete from Q. Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; } - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + /* If this command is pended, then timeout/hang occurred + * during DV. Post command and flush pending Q + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } /* Most important! Set TaskMsgContext to SCpnt's MsgContext! * (the IO to be ABORT'd) @@ -1329,37 +2608,63 @@ * swap it here either. It is an opaque cookie to * the controller, so it does not matter. -DaveM */ - ctx2abort = SCPNT_TO_MSGCTX(SCpnt); - if (ctx2abort == -1) { - printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#2) for SCpnt=%p\n", SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - } else { - dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort)); - pScsiTm->TaskMsgContext = ctx2abort; + mf = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx); + ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext; + /* This thread will not exit until tmPending is cleared + * FIXME - must ensure single threaded....DV conflict possible + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (hd->is_spi) + type = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; + else { + type = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + hd->abortSCpnt = SCpnt; + printk(KERN_WARNING MYNAM ": Attempting ABORT SCSI IO! (sc=%p)\n", SCpnt); + } - /* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake - mpt_put_msg_frame(hd->ioc->id, mf); - */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), msg)) - != 0) { - printk(KERN_WARNING MYNAM - ": WARNING[2] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", - i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); + if (mptscsih_TMHandler(hd, type, + SCpnt->target, SCpnt->lun, ctx2abort, CAN_SLEEP) < 0) { + + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", + hd->ioc->name, SCpnt); + + /* If command not found, do not do callback, + * just return failed. CHECKME + */ + if (hd->ScsiLookup[scpnt_idx] != NULL) { + //atomic_dec(&queue_depth); + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_unlock_irqrestore(sc->host->host_lock, flags); } + + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + } + + + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); } - //return SUCCESS; return FAILED; } @@ -1375,63 +2680,95 @@ int mptscsih_dev_reset(Scsi_Cmnd * SCpnt) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; MPT_SCSI_HOST *hd; - u32 *msg; - int i; + MPT_FRAME_HDR *mf; unsigned long flags; + int scpnt_idx; + u8 type; printk(KERN_WARNING MYNAM ": Attempting _TARGET_RESET (%p)\n", SCpnt); printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } - if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; + /* Find this command + */ + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. If found in + * doneQ, delete from Q. Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); SCpnt->scsi_done(SCpnt); - return FAILED; + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; } - pScsiTm = (SCSITaskMgmt_t *) mf; - msg = (u32*)mf; + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } - pScsiTm->TargetID = SCpnt->target; - pScsiTm->Bus = hd->port; - pScsiTm->ChainOffset = 0; - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + /* This thread will not exit until tmPending is cleared + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (hd->is_spi) + type = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; + else { + type = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + printk(KERN_WARNING MYNAM ": Attempting Target Reset! (sc=%p)\n", SCpnt); + } - pScsiTm->Reserved = 0; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = 0; + if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + SCpnt->target, 0, 0, CAN_SLEEP) < 0) { + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", + hd->ioc->name, SCpnt); - /* _TARGET_RESET goes to LUN 0 always! */ - for (i = 0; i < 8; i++) - pScsiTm->LUN[i] = 0; - - /* Control: No data direction, set task mgmt bit? */ - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + /* If command not found, do not do callback, + * just returned failed. CHECKME. + */ + if (hd->ScsiLookup[scpnt_idx] != NULL) { + //atomic_dec(&queue_depth); + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + } - pScsiTm->TaskMsgContext = cpu_to_le32(0); + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + } -/* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake - mpt_put_msg_frame(hd->ioc->id, mf); -*/ -/* FIXME! Check return status! */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), msg)) - != 0) { - printk(KERN_WARNING MYNAM - ": WARNING[3] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", - i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); } //return SUCCESS; @@ -1450,68 +2787,96 @@ int mptscsih_bus_reset(Scsi_Cmnd * SCpnt) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; MPT_SCSI_HOST *hd; - u32 *msg; - int i; + MPT_FRAME_HDR *mf; unsigned long flags; + int scpnt_idx; printk(KERN_WARNING MYNAM ": Attempting _BUS_RESET (%p)\n", SCpnt); printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } - if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; + /* Find this command + */ + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. If found in + * doneQ, delete from Q. Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); SCpnt->scsi_done(SCpnt); - return FAILED; + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; } - pScsiTm = (SCSITaskMgmt_t *) mf; - msg = (u32 *) mf; + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } - pScsiTm->TargetID = SCpnt->target; - pScsiTm->Bus = hd->port; - pScsiTm->ChainOffset = 0; - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + /* This thread will not exit until tmPending is cleared + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - pScsiTm->Reserved = 0; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = 0; + if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + 0, 0, 0, CAN_SLEEP) < 0) { + + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", + hd->ioc->name, SCpnt); - for (i = 0; i < 8; i++) - pScsiTm->LUN[i] = 0; + /* If command not found, do not do callback, + * just returned failed. CHECKME. + */ + if (hd->ScsiLookup[scpnt_idx] != NULL) { + //atomic_dec(&queue_depth); + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + } - /* Control: No data direction, set task mgmt bit? */ - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); - pScsiTm->TaskMsgContext = cpu_to_le32(0); + return FAILED; + } -/* MPI v0.10 requires SCSITaskMgmt requests be sent via Doorbell/handshake - mpt_put_msg_frame(hd->ioc->id, mf); -*/ -/* FIXME! Check return status! */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), msg)) - != 0) { - printk(KERN_WARNING MYNAM - ": WARNING[4] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", - i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); } return SUCCESS; } -#if 0 /* { */ +#if 0 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mptscsih_host_reset - Perform a SCSI host adapter RESET! @@ -1523,11 +2888,61 @@ * Returns SUCCESS or FAILED. */ int -mptscsih_host_reset(Scsi_Cmnd * SCpnt) +mptscsih_host_reset(Scsi_Cmnd *SCpnt) { - return FAILED; + MPT_SCSI_HOST *hd; + MPT_FRAME_HDR *mf; + + printk(KERN_WARNING MYNAM ": Attempting HOST_RESET (%p)\n", SCpnt); + printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + + if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { + SCpnt->result = DID_RESET << 16; + spin_lock_irqsave(sc->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); + return SUCCESS; + } + + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } + + /* This thread will not exit until tmPending is cleared + */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0) { + SCpnt->result = STS_BUSY; + spin_lock_irqsave(sc->host->host_lock, flags); // sjr-added + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(sc->host->host_lock, flags); // sjr-added + return FAILED; + } + + /* Spin on tmPending until we get the interrupt for this TM request. + */ + while (1) { + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (!hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); + } + + return SUCCESS; } -#endif /* } */ +#endif #else /* MPT_SCSI old EH stuff... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1546,57 +2961,118 @@ MPT_FRAME_HDR *mf; struct tq_struct *ptaskfoo; unsigned long flags; + int scpnt_idx; - printk(KERN_WARNING MYNAM ": Scheduling _ABORT SCSI IO (=%p)\n", SCpnt); - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + printk(KERN_WARNING MYNAM ": OldAbort scheduling ABORT SCSI IO (sc=%p)\n", SCpnt); + printk(KERN_WARNING " IOs outstanding = %d\n", atomic_read(&queue_depth)); if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { - SCpnt->result = DID_ABORT << 16; + printk(KERN_WARNING " WARNING - OldAbort, NULL hostdata ptr!!\n"); + SCpnt->result = DID_ERROR << 16; + SCpnt->scsi_done(SCpnt); + return SCSI_ABORT_NOT_RUNNING; + } + + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. + * If found in doneQ, delete from Q. + * Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; SCpnt->scsi_done(SCpnt); return SCSI_ABORT_SUCCESS; + } else { + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } } /* * Check to see if there's already an ABORT queued for this guy. */ - mf = search_taskQ(0,SCpnt,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + mf = search_taskQ(0, SCpnt, hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); if (mf != NULL) { + dtmprintk((MYIOC_s_INFO_FMT "OldAbort:Abort Task PENDING cmd (%p) taskQ depth (%d)\n", + hd->ioc->name, SCpnt, hd->taskQcnt)); + return SCSI_ABORT_PENDING; + } + + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + /* If IOC is reloading FW, return PENDING. + */ + spin_lock_irqsave(&hd->ioc->diagLock, flags); + if (hd->ioc->diagPending) { + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); return SCSI_ABORT_PENDING; } + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); + /* If there are no message frames what should we do? + */ if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); - return SCSI_ABORT_BUSY; + printk((KERN_WARNING " WARNING - OldAbort, no msg frames!!\n")); + /* We are out of message frames! + * Call the reset handler to do a FW reload. + */ + printk((KERN_WARNING " Reloading Firmware!!\n")); + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING " Firmware Reload FAILED!!\n")); + } + return SCSI_ABORT_PENDING; } /* - * Add ourselves to (end of) mpt_scsih_taskQ. + * Add ourselves to (end of) taskQ . * Check to see if our _bh is running. If NOT, schedule it. */ - dslprintk((KERN_INFO MYNAM ": spinlock#2\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - Q_ADD_TAIL(&mpt_scsih_taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); - mpt_scsih_taskQ_cnt++; - /* Yikes - linkage! */ -/* SCpnt->host_scribble = (unsigned char *)mf; */ - mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + Q_ADD_TAIL(&hd->taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); + hd->taskQcnt++; + atomic_inc(&mpt_taskQdepth); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + spin_lock_irqsave(&mytaskQ_lock, flags); + + /* Save the original SCpnt mf pointer + */ + SCpnt->host_scribble = (u8 *) MPT_INDEX_2_MFPTR (hd->ioc, scpnt_idx); + + /* For the time being, force bus reset on any abort + * requests for the 1030 FW. + */ + if (hd->is_spi) + mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; + else + mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + mf->u.frame.linkage.argp1 = SCpnt; - if (! mpt_scsih_taskQ_bh_active) { - mpt_scsih_taskQ_bh_active = 1; + mf->u.frame.linkage.argp2 = (void *) hd; + + dtmprintk((MYIOC_s_INFO_FMT "OldAbort:_bh_handler state (%d) taskQ count (%d)\n", + hd->ioc->name, mytaskQ_bh_active, hd->taskQcnt)); + + if (! mytaskQ_bh_active) { + mytaskQ_bh_active = 1; /* * Oh how cute, no alloc/free/mgmt needed if we use * (bottom/unused portion of) MPT request frame. */ - ptaskfoo = (struct tq_struct *) ((u8*)mf + hd->ioc->req_sz - sizeof(*ptaskfoo)); + ptaskfoo = (struct tq_struct *) &mptscsih_ptaskfoo; ptaskfoo->sync = 0; ptaskfoo->routine = mptscsih_taskmgmt_bh; ptaskfoo->data = SCpnt; SCHEDULE_TASK(ptaskfoo); } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_unlock_irqrestore(&mytaskQ_lock, flags); return SCSI_ABORT_PENDING; } @@ -1618,9 +3094,10 @@ MPT_FRAME_HDR *mf; struct tq_struct *ptaskfoo; unsigned long flags; + int scpnt_idx; - printk(KERN_WARNING MYNAM ": Scheduling _BUS_RESET (=%p)\n", SCpnt); - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + printk(KERN_WARNING MYNAM ": OldReset scheduling BUS_RESET (sc=%p)\n", SCpnt); + printk(KERN_WARNING " IOs outstanding = %d\n", atomic_read(&queue_depth)); if ((hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata) == NULL) { SCpnt->result = DID_RESET << 16; @@ -1628,48 +3105,102 @@ return SCSI_RESET_SUCCESS; } + if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { + /* Cmd not found in ScsiLookup. + * If found in doneQ, delete from Q. + * Do OS callback. + */ + search_doneQ_for_cmd(hd, SCpnt); + + SCpnt->result = DID_RESET << 16; + SCpnt->scsi_done(SCpnt); + return SCSI_RESET_SUCCESS; + } else { + /* If this command is pended, then timeout/hang occurred + * during DV. Force bus reset by posting command to F/W + * and then following up with the reset request. + */ + if ((mf = mptscsih_search_pendingQ(hd, scpnt_idx)) != NULL) { + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + post_pendingQ_commands(hd); + } + } + + /* + * Check to see if there's an ABORT_TASK queued for this guy. + * If so, delete. + */ + search_taskQ(1, SCpnt, hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + /* * Check to see if there's already a BUS_RESET queued for this guy. */ - mf = search_taskQ(0,SCpnt,MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS); + mf = search_taskQ(0, SCpnt, hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS); if (mf != NULL) { + dtmprintk((MYIOC_s_INFO_FMT "OldReset:Reset Task PENDING cmd (%p) taskQ depth (%d)\n", + hd->ioc->name, SCpnt, hd->taskQcnt)); return SCSI_RESET_PENDING; } + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + /* If IOC is reloading FW, return PENDING. + */ + spin_lock_irqsave(&hd->ioc->diagLock, flags); + if (hd->ioc->diagPending) { + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); + return SCSI_RESET_PENDING; + } + spin_unlock_irqrestore(&hd->ioc->diagLock, flags); + if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc->id)) == NULL) { -/* SCpnt->result = DID_SOFT_ERROR << 16; */ - SCpnt->result = STS_BUSY; - SCpnt->scsi_done(SCpnt); - return SCSI_RESET_PUNT; + /* We are out of message frames! + * Call the reset handler to do a FW reload. + */ + printk((KERN_WARNING " Reloading Firmware!!\n")); + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING " Firmware Reload FAILED!!\n")); + } + return SCSI_RESET_PENDING; } /* - * Add ourselves to (end of) mpt_scsih_taskQ. + * Add ourselves to (end of) taskQ. * Check to see if our _bh is running. If NOT, schedule it. */ - dslprintk((KERN_INFO MYNAM ": spinlock#3\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - Q_ADD_TAIL(&mpt_scsih_taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); - mpt_scsih_taskQ_cnt++; - /* Yikes - linkage! */ -/* SCpnt->host_scribble = (unsigned char *)mf; */ + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + Q_ADD_TAIL(&hd->taskQ, &mf->u.frame.linkage, MPT_FRAME_HDR); + hd->taskQcnt++; + atomic_inc(&mpt_taskQdepth); + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + spin_lock_irqsave(&mytaskQ_lock, flags); + + /* Save the original SCpnt mf pointer + */ + SCpnt->host_scribble = (u8 *) MPT_INDEX_2_MFPTR (hd->ioc, scpnt_idx); + mf->u.frame.linkage.arg1 = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; mf->u.frame.linkage.argp1 = SCpnt; - if (! mpt_scsih_taskQ_bh_active) { - mpt_scsih_taskQ_bh_active = 1; + mf->u.frame.linkage.argp2 = (void *) hd; + + dtmprintk((MYIOC_s_INFO_FMT "OldReset: _bh_handler state (%d) taskQ count (%d)\n", + hd->ioc->name, mytaskQ_bh_active, hd->taskQcnt)); + + if (! mytaskQ_bh_active) { + mytaskQ_bh_active = 1; /* * Oh how cute, no alloc/free/mgmt needed if we use * (bottom/unused portion of) MPT request frame. */ - ptaskfoo = (struct tq_struct *) ((u8*)mf + hd->ioc->req_sz - sizeof(*ptaskfoo)); + ptaskfoo = (struct tq_struct *) &mptscsih_ptaskfoo; ptaskfoo->sync = 0; ptaskfoo->routine = mptscsih_taskmgmt_bh; ptaskfoo->data = SCpnt; SCHEDULE_TASK(ptaskfoo); } - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); - + spin_unlock_irqrestore(&mytaskQ_lock, flags); return SCSI_RESET_PENDING; } @@ -1686,147 +3217,171 @@ void mptscsih_taskmgmt_bh(void *sc) { + MPT_ADAPTER *ioc; Scsi_Cmnd *SCpnt; - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; + MPT_FRAME_HDR *mf = NULL; MPT_SCSI_HOST *hd; u32 ctx2abort = 0; - int i; unsigned long flags; + int scpnt_idx; + int did; u8 task_type; - dslprintk((KERN_INFO MYNAM ": spinlock#4\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - mpt_scsih_taskQ_bh_active = 1; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + spin_lock_irqsave(&mytaskQ_lock, flags); + mytaskQ_bh_active = 1; + spin_unlock_irqrestore(&mytaskQ_lock, flags); - while (1) { - current->state = TASK_INTERRUPTIBLE; + do { + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/4); + did = 0; - /* - * We MUST remove item from taskQ *before* we format the - * frame as a SCSITaskMgmt request and send it down to the IOC. - */ - dslprintk((KERN_INFO MYNAM ": spinlock#5\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - if (Q_IS_EMPTY(&mpt_scsih_taskQ)) { - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); - break; - } - mf = mpt_scsih_taskQ.head; - Q_DEL_ITEM(&mf->u.frame.linkage); - mpt_scsih_taskQ_cnt--; - mpt_scsih_active_taskmgmt_mf = mf; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); - - SCpnt = (Scsi_Cmnd*)mf->u.frame.linkage.argp1; - if (SCpnt == NULL) { - printk(KERN_ERR MYNAM ": ERROR - TaskMgmt has NULL SCpnt! (%p:%p)\n", mf, SCpnt); - continue; - } - hd = (MPT_SCSI_HOST *) SCpnt->host->hostdata; - pScsiTm = (SCSITaskMgmt_t *) mf; + for (ioc = mpt_adapter_find_first(); ioc != NULL; ioc = mpt_adapter_find_next(ioc)) { + if (ioc->sh) { + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd == NULL) { + printk(KERN_ERR MYNAM + ": ERROR - TaskMgmt NULL SCSI Host!" + "(ioc=%p, sh=%p hd=%p)\n", + ioc, ioc->sh, hd); + continue; + } - for (i = 0; i < 8; i++) { - pScsiTm->LUN[i] = 0; - } + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (Q_IS_EMPTY(&hd->taskQ)) { + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - task_type = mf->u.frame.linkage.arg1; - if (task_type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { - printk(KERN_WARNING MYNAM ": Attempting _ABORT SCSI IO! (mf=%p:sc=%p)\n", - mf, SCpnt); - - /* Most important! Set TaskMsgContext to SCpnt's MsgContext! - * (the IO to be ABORT'd) - * - * NOTE: Since we do not byteswap MsgContext, we do not - * swap it here either. It is an opaque cookie to - * the controller, so it does not matter. -DaveM - */ - ctx2abort = SCPNT_TO_MSGCTX(SCpnt); - if (ctx2abort == -1) { - printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#1) for SCpnt=%p\n", SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - continue; - } - pScsiTm->LUN[1] = SCpnt->lun; - } - else if (task_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) - { - printk(KERN_WARNING MYNAM ": Attempting _BUS_RESET! (against SCSI IO mf=%p:sc=%p)\n", mf, SCpnt); - } -#if 0 - else if (task_type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {} - else if (task_type == MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET) {} -#endif + /* If we ever find a non-empty queue, + * keep the handler alive + */ + did++; - printk(KERN_WARNING MYNAM ": IOs outstanding = %d\n", atomic_read(&queue_depth)); + /* tmPending is SMP lock-protected */ + if (hd->tmPending || hd->tmPtr) { + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } + hd->tmPending = 1; - pScsiTm->TargetID = SCpnt->target; - pScsiTm->Bus = hd->port; - pScsiTm->ChainOffset = 0; - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + /* Process this request + */ + mf = hd->taskQ.head; + Q_DEL_ITEM(&mf->u.frame.linkage); + hd->taskQcnt--; + atomic_dec(&mpt_taskQdepth); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + SCpnt = (Scsi_Cmnd*)mf->u.frame.linkage.argp1; + if (SCpnt == NULL) { + printk(KERN_ERR MYNAM ": ERROR - TaskMgmt has NULL SCpnt! (mf=%p:sc=%p)\n", + mf, SCpnt); + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - pScsiTm->Reserved = 0; - pScsiTm->TaskType = task_type; - pScsiTm->Reserved1 = 0; - pScsiTm->MsgFlags = 0; + /* Get the ScsiLookup index pointer + * from the SC pointer. + */ + if (!SCpnt->host_scribble || ((MPT_SCSI_HOST *)SCpnt->host->hostdata != hd)) { + /* The command associated with the + * abort/reset request must have + * completed and this is a stale + * request. We are done. + * Free the current MF and continue. + */ + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - for (i = 0; i < 7; i++) - pScsiTm->Reserved2[i] = 0; + scpnt_idx = MFPTR_2_MPT_INDEX(hd->ioc, SCpnt->host_scribble); + if (scpnt_idx != SCPNT_TO_LOOKUP_IDX(SCpnt)) { + /* Error! this should never happen!! + */ + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + continue; + } - dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort)); - pScsiTm->TaskMsgContext = ctx2abort; + task_type = mf->u.frame.linkage.arg1; + ctx2abort = 0; + if (task_type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + MPT_FRAME_HDR *SCpntMf; + + /* + * Most important! Set TaskMsgContext to SCpnt's MsgContext! + * (the IO to be ABORT'd) + * + * NOTE: Since we do not byteswap MsgContext, we do not + * swap it here either. It is an opaque cookie to + * the controller, so it does not matter. -DaveM + */ + SCpntMf = (MPT_FRAME_HDR *) SCpnt->host_scribble; + ctx2abort = SCpntMf->u.frame.hwhdr.msgctxu.MsgContext; + + hd->abortSCpnt = SCpnt; + printk(KERN_WARNING MYNAM ": Attempting ABORT SCSI IO! (mf=%p:sc=%p)\n", + mf, SCpnt); + } - /* Control: No data direction, set task mgmt bit? */ + /* The TM handler will allocate a new mf, + * so free the current mf. + */ + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + mf = NULL; - /* - * As of MPI v0.10 this request can NOT be sent (normally) - * via FIFOs. So we can't: - * mpt_put_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - * SCSITaskMgmt requests MUST be sent ONLY via - * Doorbell/handshake now. :-( - */ - if ((i = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc->id, - sizeof(SCSITaskMgmt_t), (u32*) mf)) - != 0) { - printk(KERN_WARNING MYNAM ": WARNING[1] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", i, mf, SCpnt); - SCpnt->result = DID_SOFT_ERROR << 16; - spin_lock_irqsave(SCpnt->host->host_lock, flags); - SCpnt->scsi_done(SCpnt); - spin_unlock_irqrestore(SCpnt->host->host_lock, flags); - mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); - } else { - /* Spin-Wait for TaskMgmt complete!!! */ - while (mpt_scsih_active_taskmgmt_mf != NULL) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ/4); + if (mptscsih_TMHandler(hd, task_type, SCpnt->target, SCpnt->lun, ctx2abort, NO_SLEEP) < 0) { + + /* The TM request failed and the subsequent FW-reload failed! + * Fatal error case. + */ + printk(KERN_WARNING MYNAM + ": WARNING[1] - IOC error processing TaskMgmt request (sc=%p)\n", SCpnt); + + if (hd->ScsiLookup[scpnt_idx] != NULL) { + atomic_dec(&queue_depth); + SCpnt->result = DID_SOFT_ERROR << 16; + spin_lock_irqsave(SCpnt->host->host_lock, flags); + SCpnt->scsi_done(SCpnt); + spin_unlock_irqrestore(SCpnt->host->host_lock, + flags); + mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf); + } + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + hd->abortSCpnt = NULL; + } } } - } + if (atomic_read(&mpt_taskQdepth) > 0) + did++; - dslprintk((KERN_INFO MYNAM ": spinlock#6\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - mpt_scsih_taskQ_bh_active = 0; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + } while ( did ); + + spin_lock_irqsave(&mytaskQ_lock, flags); + mytaskQ_bh_active = 0; + spin_unlock_irqrestore(&mytaskQ_lock, flags); return; } - #endif /* } */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptscsih_taskmgmt_complete - Callback routine, gets registered to - * Fusion MPT base driver + * mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver * @ioc: Pointer to MPT_ADAPTER structure * @mf: Pointer to SCSI task mgmt request frame - * @r: Pointer to SCSI task mgmt reply frame + * @mr: Pointer to SCSI task mgmt reply frame * * This routine is called from mptbase.c::mpt_interrupt() at the completion * of any SCSI task management request. @@ -1836,73 +3391,165 @@ * Returns 1 indicating alloc'd request frame ptr should be freed. */ static int -mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r) +mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { SCSITaskMgmtReply_t *pScsiTmReply; SCSITaskMgmt_t *pScsiTmReq; - u8 tmType; -#ifndef MPT_SCSI_USE_NEW_EH + MPT_SCSI_HOST *hd = NULL; unsigned long flags; -#endif - - dprintk((KERN_INFO MYNAM ": SCSI TaskMgmt completed mf=%p, r=%p\n", - mf, r)); + u8 tmType = 0; -#ifndef MPT_SCSI_USE_NEW_EH - dslprintk((KERN_INFO MYNAM ": spinlock#7\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - /* It better be the active one! */ - if (mf != mpt_scsih_active_taskmgmt_mf) { - printk(KERN_ERR MYNAM ": ERROR! Non-active TaskMgmt (=%p) completed!\n", mf); - mpt_scsih_active_taskmgmt_mf = NULL; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + dtmprintk((MYIOC_s_INFO_FMT "SCSI TaskMgmt completed (mf=%p,r=%p)\n", + ioc->name, mf, mr)); + if (ioc->sh) { + /* Depending on the thread, a timer is activated for + * the TM request. Delete this timer on completion of TM. + * Decrement count of outstanding TM requests. + */ + hd = (MPT_SCSI_HOST *)ioc->sh->hostdata; + if (hd->tmPtr) { + del_timer(&hd->TMtimer); + } + dtmprintk((MYIOC_s_INFO_FMT "taskQcnt (%d)\n", + ioc->name, hd->taskQcnt)); + } else { + dtmprintk((MYIOC_s_WARN_FMT "TaskMgmt Complete: NULL Scsi Host Ptr\n", + ioc->name)); return 1; } -#ifdef MPT_DEBUG - if ((mf == NULL) || - (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { - printk(KERN_ERR MYNAM ": ERROR! NULL or BAD TaskMgmt ptr (=%p)!\n", mf); - mpt_scsih_active_taskmgmt_mf = NULL; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + if (mr == NULL) { + dtmprintk((MYIOC_s_WARN_FMT "ERROR! TaskMgmt Reply: NULL Request %p\n", + ioc->name, mf)); return 1; - } -#endif - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); -#endif - - if (r != NULL) { - pScsiTmReply = (SCSITaskMgmtReply_t*)r; + } else { + pScsiTmReply = (SCSITaskMgmtReply_t*)mr; pScsiTmReq = (SCSITaskMgmt_t*)mf; /* Figure out if this was ABORT_TASK, TARGET_RESET, or BUS_RESET! */ tmType = pScsiTmReq->TaskType; - dprintk((KERN_INFO MYNAM ": TaskType = %d\n", tmType)); - dprintk((KERN_INFO MYNAM ": TerminationCount = %d\n", - le32_to_cpu(pScsiTmReply->TerminationCount))); + dtmprintk((KERN_INFO " TaskType = %d, TerminationCount=%d\n", + tmType, le32_to_cpu(pScsiTmReply->TerminationCount))); /* Error? (anything non-zero?) */ if (*(u32 *)&pScsiTmReply->Reserved2[0]) { - dprintk((KERN_INFO MYNAM ": SCSI TaskMgmt (%d) - Oops!\n", tmType)); - dprintk((KERN_INFO MYNAM ": IOCStatus = %04xh\n", - le16_to_cpu(pScsiTmReply->IOCStatus))); - dprintk((KERN_INFO MYNAM ": IOCLogInfo = %08xh\n", + u16 iocstatus; + + iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + dtmprintk((KERN_INFO " SCSI TaskMgmt (%d) - Oops!\n", tmType)); + dtmprintk((KERN_INFO " IOCStatus = %04xh\n", iocstatus)); + dtmprintk((KERN_INFO " IOCLogInfo = %08xh\n", le32_to_cpu(pScsiTmReply->IOCLogInfo))); + + /* clear flags and continue. + */ + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + hd->abortSCpnt = NULL; +#ifdef DROP_TEST + if (dropMfPtr) + dropTestBad++; +#endif + /* If an internal command is present + * or the TM failed - reload the FW. + * FC FW may respond FAILED to an ABORT + */ + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { + if ((hd->cmdPtr) || + (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED)) { + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING + " Firmware Reload FAILED!!\n")); + } + } + } } else { - dprintk((KERN_INFO MYNAM ": SCSI TaskMgmt (%d) SUCCESS!\n", tmType)); + dtmprintk((KERN_INFO " SCSI TaskMgmt SUCCESS!\n")); + +#ifndef MPT_SCSI_USE_NEW_EH + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { + /* clean taskQ - remove tasks associated with + * completed commands. + */ + clean_taskQ(hd); + } else if (tmType == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + /* If taskQ contains another request + * for this SCpnt, delete this request. + */ + search_taskQ_for_cmd(hd->abortSCpnt, hd); + } +#endif + hd->numTMrequests--; + hd->abortSCpnt = NULL; + flush_doneQ(hd); + +#ifdef DROP_TEST + if (dropMfPtr) + dropTestOK++; +#endif + } + } + +#ifdef DROP_TEST + { + Scsi_Cmnd *sc; + unsigned long flags; + u16 req_idx; + + /* Free resources for the drop test MF and chain buffers. + */ + if (dropMfPtr) { + req_idx = le16_to_cpu(dropMfPtr->u.frame.hwhdr.msgctxu.fld.req_idx); + sc = hd->ScsiLookup[req_idx]; + if (sc == NULL) { + printk(MYIOC_s_ERR_FMT + "Drop Test: NULL ScsiCmd ptr!\n", + ioc->name); + } else { + sc->host_scribble = NULL; + if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) + sc->result = DID_RESET << 16; + else + sc->result = DID_ABORT << 16; + + hd->ScsiLookup[req_idx] = NULL; + atomic_dec(&queue_depth); + spin_lock_irqsave(sc->host->host_lock, flags); + sc->scsi_done(sc); /* Issue callback */ + spin_unlock_irqrestore(sc->host->host_lock, flags); + + mptscsih_freeChainBuffers(hd, req_idx); + mpt_free_msg_frame(ScsiDoneCtx, ioc->id, dropMfPtr); + + printk(MYIOC_s_INFO_FMT + "Free'd Dropped cmd (%p)\n", + hd->ioc->name, sc); + printk(MYIOC_s_INFO_FMT + "mf (%p) reqidx (%4x)\n", + hd->ioc->name, dropMfPtr, + req_idx); + printk(MYIOC_s_INFO_FMT + "Num Tot (%d) Good (%d) Bad (%d) \n", + hd->ioc->name, dropTestNum, + dropTestOK, dropTestBad); + } + dropMfPtr = NULL; } } +#endif #ifndef MPT_SCSI_USE_NEW_EH /* * Signal to _bh thread that we finished. + * This IOC can now process another TM command. */ - dslprintk((KERN_INFO MYNAM ": spinlock#8\n")); - spin_lock_irqsave(&mpt_scsih_taskQ_lock, flags); - mpt_scsih_active_taskmgmt_mf = NULL; - spin_unlock_irqrestore(&mpt_scsih_taskQ_lock, flags); + dtmprintk((MYIOC_s_INFO_FMT "taskmgmt_complete: (=%p) done! Num Failed(%d) Task Count (%d)\n", + ioc->name, mf, hd->numTMrequests, hd->taskQcnt)); #endif + hd->tmPtr = NULL; + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); return 1; } @@ -1930,6 +3577,45 @@ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* + * OS entry point to adjust the queue_depths on a per-device basis. + * Called once per device the bus scan. Use it to force the queue_depth + * member to 1 if a device does not support Q tags. + */ +void +mptscsih_select_queue_depths(struct Scsi_Host *sh, Scsi_Device *sdList) +{ + struct scsi_device *device; + VirtDevice *pTarget; + MPT_SCSI_HOST *hd; + int ii, max; + + for (device = sdList; device; device = device->next) { + + if (device->host != sh) + continue; + + hd = (MPT_SCSI_HOST *) sh->hostdata; + if (!hd) + continue; + + if (hd->Targets) { + if (hd->is_spi) + max = MPT_MAX_SCSI_DEVICES; + else + max = MPT_MAX_FC_DEVICES; + + for (ii=0; ii < max; ii++) { + pTarget = hd->Targets[ii]; + if (pTarget && !(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) { + device->queue_depth = 1; + } + } + } + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* * Private routines... */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1956,6 +3642,7 @@ case REASSIGN_BLOCKS: case PERSISTENT_RESERVE_OUT: case 0xea: + case 0xa3: return 1; /* No data transfer commands */ @@ -1980,7 +3667,7 @@ return 0; case RESERVE_10: - if (cmd->cmnd[1] & 0x03) /* RESERSE:{LongID|Extent} (data out phase)? */ + if (cmd->cmnd[1] & 0x03) /* RESERVE:{LongID|Extent} (data out phase)? */ return 1; else return 0; @@ -2000,16 +3687,27 @@ } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Utility function to copy sense data from the scsi_cmnd buffer + * to the FC and SCSI target structures. + * + */ static void copy_sense_data(Scsi_Cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply) { - MPT_SCSI_DEV *mpt_sdev = NULL; + VirtDevice *target; + SCSIIORequest_t *pReq; u32 sense_count = le32_to_cpu(pScsiReply->SenseCount); - char devFoo[32]; + int index; + char devFoo[96]; IO_Info_t thisIo; - if (sc && sc->device) - mpt_sdev = (MPT_SCSI_DEV*) sc->device->hostdata; + /* Get target structure + */ + pReq = (SCSIIORequest_t *) mf; + index = (int) pReq->TargetID; + target = hd->Targets[index]; + if (hd->is_multipath && sc->device->hostdata) + target = (VirtDevice *) sc->device->hostdata; if (sense_count) { u8 *sense_data; @@ -2017,49 +3715,84 @@ /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); - sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * 256)); + sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); - /* Cache SenseData for this SCSI device! */ - if (mpt_sdev) { - memcpy(mpt_sdev->CachedSense.data, sense_data, sense_count); - mpt_sdev->sense_sz = sense_count; + + /* save sense data to the target device + */ + if (target) { + int sz; + + sz = MIN(pReq->SenseBufferLength, sense_count); + if (sz > SCSI_STD_SENSE_BYTES) + sz = SCSI_STD_SENSE_BYTES; + memcpy(target->sense, sense_data, sz); + target->tflags |= MPT_TARGET_FLAGS_VALID_SENSE; } - } else { - dprintk((KERN_INFO MYNAM ": Hmmm... SenseData len=0! (?)\n")); - } + /* Log SMART data (asc = 0x5D, non-IM case only) if required. + */ + if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) { + if ((sense_data[12] == 0x5D) && (target->raidVolume == 0)) { + int idx; + MPT_ADAPTER *ioc = hd->ioc; + + idx = ioc->eventContext % ioc->eventLogSize; + ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE; + ioc->events[idx].eventContext = ioc->eventContext; + + ioc->events[idx].data[0] = (pReq->LUN[1] << 24) || + (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) || + (pReq->Bus << 8) || pReq->TargetID; + + ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12]; + + ioc->eventContext++; + } + } - thisIo.cdbPtr = sc->cmnd; - thisIo.sensePtr = sc->sense_buffer; - thisIo.SCSIStatus = pScsiReply->SCSIStatus; - thisIo.DoDisplay = 1; - sprintf(devFoo, "ioc%d,scsi%d:%d", hd->ioc->id, sc->target, sc->lun); - thisIo.DevIDStr = devFoo; + /* Print an error report for the user. + */ + thisIo.cdbPtr = sc->cmnd; + thisIo.sensePtr = sc->sense_buffer; + thisIo.SCSIStatus = pScsiReply->SCSIStatus; + thisIo.DoDisplay = 1; + if (hd->is_multipath) + sprintf(devFoo, "%d:%d:%d \"%s\"", + hd->ioc->id, + pReq->TargetID, + pReq->LUN[1], + target->dev_vol_name); + else + sprintf(devFoo, "%d:%d:%d", hd->ioc->id, sc->target, sc->lun); + thisIo.DevIDStr = devFoo; /* fubar */ - thisIo.dataPtr = NULL; - thisIo.inqPtr = NULL; - if (sc->device) { - thisIo.inqPtr = sc->device->vendor-8; /* FIXME!!! */ + thisIo.dataPtr = NULL; + thisIo.inqPtr = NULL; + if (sc->device) { + thisIo.inqPtr = sc->device->vendor-8; /* FIXME!!! */ + } + (void) mpt_ScsiHost_ErrorReport(&thisIo); + + } else { + dprintk((MYIOC_s_INFO_FMT "Hmmm... SenseData len=0! (?)\n", + hd->ioc->name)); } - (void) mpt_ScsiHost_ErrorReport(&thisIo); return; } -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static u32 -SCPNT_TO_MSGCTX(Scsi_Cmnd *sc) +SCPNT_TO_LOOKUP_IDX(Scsi_Cmnd *sc) { MPT_SCSI_HOST *hd; - MPT_FRAME_HDR *mf; int i; hd = (MPT_SCSI_HOST *) sc->host->hostdata; for (i = 0; i < hd->ioc->req_depth; i++) { if (hd->ScsiLookup[i] == sc) { - mf = MPT_INDEX_2_MFPTR(hd->ioc, i); - return mf->u.frame.hwhdr.msgctxu.MsgContext; + return i; } } @@ -2075,18 +3808,262 @@ # include "../../scsi/scsi_module.c" #endif +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Search the pendingQ for a command with specific index. + * If found, delete and return mf pointer + * If not found, return NULL + */ +static MPT_FRAME_HDR * +mptscsih_search_pendingQ(MPT_SCSI_HOST *hd, int scpnt_idx) +{ + unsigned long flags; + MPT_DONE_Q *buffer; + MPT_FRAME_HDR *mf = NULL; + MPT_FRAME_HDR *cmdMfPtr = NULL; + + ddvtprintk((MYIOC_s_INFO_FMT ": search_pendingQ called...", hd->ioc->name)); + cmdMfPtr = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx); + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (!Q_IS_EMPTY(&hd->pendingQ)) { + buffer = hd->pendingQ.head; + do { + mf = (MPT_FRAME_HDR *) buffer->argp; + if (mf == cmdMfPtr) { + Q_DEL_ITEM(buffer); + + /* clear the arg pointer + */ + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + break; + } + mf = NULL; + } while ((buffer = buffer->forw) != (MPT_DONE_Q *) &hd->pendingQ); + } + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + ddvtprintk((" ...return %p\n", mf)); + return mf; +} + +/* Post all commands on the pendingQ to the FW. + * Lock Q when deleting/adding members + * Lock io_request_lock for OS callback. + */ +static void +post_pendingQ_commands(MPT_SCSI_HOST *hd) +{ + MPT_FRAME_HDR *mf; + MPT_DONE_Q *buffer; + unsigned long flags; + + /* Flush the pendingQ. + */ + ddvtprintk((MYIOC_s_INFO_FMT ": post_pendingQ_commands called\n", hd->ioc->name)); + while (1) { + spin_lock_irqsave(&hd->freedoneQlock, flags); + if (Q_IS_EMPTY(&hd->pendingQ)) { + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + break; + } + + buffer = hd->pendingQ.head; + /* Delete from Q + */ + Q_DEL_ITEM(buffer); + + mf = (MPT_FRAME_HDR *) buffer->argp; + if (!mf) { + /* This should never happen */ + printk(MYIOC_s_WARN_FMT "post_pendingQ_commands: mf %p\n", hd->ioc->name, mf); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + continue; + } + + mptscsih_put_msgframe(ScsiDoneCtx, hd->ioc->id, mf); + ddvtprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (mf=%p)\n", + hd->ioc->name, mf)); + + buffer->argp = NULL; + + /* Add to the freeQ + */ + Q_ADD_TAIL(&hd->freeQ.head, buffer, MPT_DONE_Q); + spin_unlock_irqrestore(&hd->freedoneQlock, flags); + } + + return; +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { - dprintk((KERN_INFO MYNAM ": IOC %s_reset routed to SCSI host driver!\n", + MPT_SCSI_HOST *hd = NULL; + unsigned long flags; + + dtmprintk((KERN_WARNING MYNAM + ": IOC %s_reset routed to SCSI host driver!\n", reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")); + /* If a FW reload request arrives after base installed but + * before all scsi hosts have been attached, then an alt_ioc + * may have a NULL sh pointer. + */ + if ((ioc->sh == NULL) || (ioc->sh->hostdata == NULL)) + return 0; + else + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (reset_phase == MPT_IOC_PRE_RESET) { - /* FIXME! Do pre-reset cleanup */ + dtmprintk((MYIOC_s_WARN_FMT "Do Pre-Diag Reset handling\n", + ioc->name)); + + /* Clean Up: + * 1. Set Hard Reset Pending Flag + * All new commands go to doneQ + */ + hd->resetPending = 1; + + /* 2. Flush running commands + * Clean drop test code - if compiled + * Clean ScsiLookup (and associated memory) + * AND clean mytaskQ + */ + + /* 2a. Drop Test Command. + */ +#ifdef DROP_TEST + { + Scsi_Cmnd *sc; + unsigned long flags; + u16 req_idx; + + /* Free resources for the drop test MF + * and chain buffers. + */ + if (dropMfPtr) { + req_idx = le16_to_cpu(dropMfPtr->u.frame.hwhdr.msgctxu.fld.req_idx); + sc = hd->ScsiLookup[req_idx]; + if (sc == NULL) { + printk(MYIOC_s_ERR_FMT + "Drop Test: NULL ScsiCmd ptr!\n", + ioc->name); + } else { + sc->host_scribble = NULL; + sc->result = DID_RESET << 16; + hd->ScsiLookup[req_idx] = NULL; + atomic_dec(&queue_depth); + spin_lock_irqsave(sc->host->host_lock, flags); + sc->scsi_done(sc); /* Issue callback */ + spin_unlock_irqrestore(sc->host->host_lock, flags); + } + + mptscsih_freeChainBuffers(hd, req_idx); + mpt_free_msg_frame(ScsiDoneCtx, ioc->id, dropMfPtr); + printk(MYIOC_s_INFO_FMT + "Free'd: mf (%p) reqidx (%4x)\n", + hd->ioc->name, dropMfPtr, + req_idx); + } + dropMfPtr = NULL; + } +#endif + + /* 2b. Reply to OS all known outstanding I/O commands. + */ + mptscsih_flush_running_cmds(hd); + + /* 2c. If there was an internal command that + * has not completed, configuration or io request, + * free these resources. + */ + if (hd->cmdPtr) { + del_timer(&hd->timer); + mpt_free_msg_frame(ScsiScanDvCtx, ioc->id, hd->cmdPtr); + atomic_dec(&queue_depth); + } + + /* 2d. If a task management has not completed, + * free resources associated with this request. + */ + if (hd->tmPtr) { + del_timer(&hd->TMtimer); + mpt_free_msg_frame(ScsiTaskCtx, ioc->id, hd->tmPtr); + } + +#ifndef MPT_SCSI_USE_NEW_EH + /* 2e. Delete all commands on taskQ + * Should be superfluous - as this taskQ should + * be empty. + */ + clean_taskQ(hd); +#endif + dtmprintk((MYIOC_s_WARN_FMT "Pre-Reset handling complete.\n", + ioc->name)); + } else { - /* FIXME! Do post-reset cleanup */ + dtmprintk((MYIOC_s_WARN_FMT "Do Post-Diag Reset handling\n", + ioc->name)); + + /* Once a FW reload begins, all new OS commands are + * redirected to the doneQ w/ a reset status. + * Init all control structures. + */ + + /* ScsiLookup initialization + */ + { + int ii; + for (ii=0; ii < hd->ioc->req_depth; ii++) + hd->ScsiLookup[ii] = NULL; + } + + /* 2. Chain Buffer initialization + */ + mptscsih_initChainBuffers(hd, 0); + + /* 3. tmPtr clear + */ + if (hd->tmPtr) { + hd->tmPtr = NULL; + } + + /* 4. Renegotiate to all devices, if SCSI + */ + if (hd->is_spi) + mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM); + + /* 5. Enable new commands to be posted + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + hd->resetPending = 0; + hd->numTMrequests = 0; + + /* 6. If there was an internal command, + * wake this process up. + */ + if (hd->cmdPtr) { + /* + * Wake up the original calling thread + */ + hd->pLocal = &hd->localReply; + hd->pLocal->completion = MPT_SCANDV_DID_RESET; + scandv_wait_done = 1; + wake_up(&scandv_waitq); + hd->cmdPtr = NULL; + } + + /* 7. Flush doneQ + */ + flush_doneQ(hd); + + dtmprintk((MYIOC_s_WARN_FMT "Post-Reset handling complete.\n", + ioc->name)); } return 1; /* currently means nothing really */ @@ -2096,9 +4073,11 @@ static int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { + MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; - dprintk((KERN_INFO MYNAM ": MPT event (=%02Xh) routed to SCSI host driver!\n", event)); + dprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", + ioc->name, event)); switch (event) { case MPI_EVENT_UNIT_ATTENTION: /* 03 */ @@ -2125,12 +4104,64 @@ * CHECKME! Falling thru... */ + case MPI_EVENT_INTEGRATED_RAID: /* 0B */ +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION + /* negoNvram set to 0 if DV enabled and to USE_NVRAM if + * if DV disabled + */ + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd->negoNvram == 0) { + ScsiCfgData *pSpi; + Ioc3PhysDisk_t *pPDisk; + int numPDisk; + u8 reason; + u8 physDiskNum; + + reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16; + if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) { + /* New or replaced disk. + * Set DV flag and schedule DV. + */ + pSpi = &ioc->spi_data; + physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24; + if (pSpi->pIocPg3) { + pPDisk = pSpi->pIocPg3->PhysDisk; + numPDisk =pSpi->pIocPg3->NumPhysDisks; + + while (numPDisk) { + if (physDiskNum == pPDisk->PhysDiskNum) { + pSpi->dvStatus[pPDisk->PhysDiskID] = MPT_SCSICFG_NEED_DV; + pSpi->forceDv = MPT_SCSICFG_NEED_DV; + ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID)); + break; + } + pPDisk++; + numPDisk--; + } + } + } + } +#endif + +#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) + printk("Raid Event RF: "); + { + u32 *m = (u32 *)pEvReply; + int ii; + int n = (int)pEvReply->MsgLength; + for (ii=6; ii < n; ii++) + printk(" %08x", le32_to_cpu(m[ii])); + printk("\n"); + } +#endif + break; + case MPI_EVENT_NONE: /* 00 */ case MPI_EVENT_LOG_DATA: /* 01 */ case MPI_EVENT_STATE_CHANGE: /* 02 */ case MPI_EVENT_EVENT_CHANGE: /* 0A */ default: - dprintk((KERN_INFO MYNAM ": Ignoring event (=%02Xh)\n", event)); + dprintk((KERN_INFO " Ignoring event (=%02Xh)\n", event)); break; } @@ -2149,7 +4180,6 @@ //extern ASCQ_Table_t *mpt_ASCQ_TablePtr; //extern int mpt_ASCQ_TableSz; -/* Lie! */ #define MYNAM "mptscsih" #endif /* } */ @@ -2327,8 +4357,6 @@ * Sense_Key_Specific() - If Sense_Key_Specific_Valid bit is set, * then print additional information via * a call to SDMS_SystemAlert(). - * - * Return: nothing */ static void Sense_Key_Specific(IO_Info_t *ioop, char *msg1) { @@ -2463,25 +4491,27 @@ } else if (ASC == 0x29 && (ASCQ < sizeof(asc_29_ascq_NN_strings)/sizeof(char*)-1)) *s1 = asc_29_ascq_NN_strings[ASCQ]; /* - * else { leave all *s[1-4] values pointing to the empty "" string } + * Else { leave all *s[1-4] values pointing to the empty "" string } */ return *s1; } /* - * Need to check ASC here; if it is "special," then - * the ASCQ is variable, and indicates failed component number. - * We must treat the ASCQ as a "don't care" while searching the - * mptscsih_ASCQ_Table[] by masking it off, and then restoring it later - * on when we actually need to identify the failed component. + * Need to check ASC here; if it is "special," then + * the ASCQ is variable, and indicates failed component number. + * We must treat the ASCQ as a "dont care" while searching the + * mptscsih_ASCQ_Table[] by masking it off, and then restoring it later + * on when we actually need to identify the failed component. */ if (SPECIAL_ASCQ(ASC,ASCQ)) ASCQ = 0xFF; - /* OK, now search mptscsih_ASCQ_Table[] for a matching entry */ + /* OK, now search mptscsih_ASCQ_Table[] for a matching entry */ for (idx = 0; mptscsih_ASCQ_TablePtr && idx < mpt_ASCQ_TableSz; idx++) - if ((ASC == mptscsih_ASCQ_TablePtr[idx].ASC) && (ASCQ == mptscsih_ASCQ_TablePtr[idx].ASCQ)) - return (*s1 = mptscsih_ASCQ_TablePtr[idx].Description); + if ((ASC == mptscsih_ASCQ_TablePtr[idx].ASC) && (ASCQ == mptscsih_ASCQ_TablePtr[idx].ASCQ)) { + *s1 = mptscsih_ASCQ_TablePtr[idx].Description; + return *s1; + } if ((ASC >= 0x80) || (ASCQ >= 0x80)) *s1 = ascq_vendor_uniq; @@ -2523,6 +4553,9 @@ * SPINNING UP (02,04/01), * LOGICAL UNIT NOT SUPPORTED (05,25/00), etc. */ + if (sk == SK_NO_SENSE) { + return 0; + } if ( (sk==SK_UNIT_ATTENTION && asc==0x29 && (ascq==0x00 || ascq==0x01)) || (sk==SK_NOT_READY && asc==0x04 && ascq==0x01) || (sk==SK_ILLEGAL_REQUEST && asc==0x25 && ascq==0x00) @@ -2591,6 +4624,2608 @@ PrintF(("%s\n", foo)); return l; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_initTarget - Target, LUN alloc/free functionality. + * @hd: Pointer to MPT_SCSI_HOST structure + * @bus_id: Bus number (?) + * @target_id: SCSI target id + * @lun: SCSI LUN id + * @data: Pointer to data + * @dlen: Number of INQUIRY bytes + * + * NOTE: It's only SAFE to call this routine if data points to + * sane & valid STANDARD INQUIRY data! + * + * Allocate and initialize memory for this target. + * Save inquiry data. + * + * Returns pointer to VirtDevice structure. + */ +static VirtDevice * +mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen) +{ + VirtDevice *vdev; + int sz; + + dprintk((MYIOC_s_INFO_FMT "initTarget (%d,%d,%d) called, hd=%p\n", + hd->ioc->name, bus_id, target_id, lun, hd)); + + if ((vdev = hd->Targets[target_id]) == NULL) { + if ((vdev = kmalloc(sizeof(VirtDevice), GFP_ATOMIC)) == NULL) { + printk(MYIOC_s_ERR_FMT "initTarget kmalloc(%d) FAILED!\n", + hd->ioc->name, (int)sizeof(VirtDevice)); + } else { + memset(vdev, 0, sizeof(VirtDevice)); + rwlock_init(&vdev->VdevLock); + Q_INIT(&vdev->WaitQ, void); + Q_INIT(&vdev->SentQ, void); + Q_INIT(&vdev->DoneQ, void); + vdev->tflags = 0; + vdev->ioc_id = hd->ioc->id; + vdev->target_id = target_id; + vdev->bus_id = bus_id; + + hd->Targets[target_id] = vdev; + dprintk((KERN_INFO " *NEW* Target structure (id %d) @ %p\n", + target_id, vdev)); + } + } + + if (vdev && data) { + if (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) { + + /* Copy the inquiry data - if we haven't yet. + */ + sz = MIN(dlen, SCSI_STD_INQUIRY_BYTES); + + memcpy (vdev->inq_data, data, sz); + vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY; + + /* Update the target capabilities + */ + mptscsih_setTargetNegoParms(hd, vdev); + } + + /* Is LUN supported? If so, upper 3 bits will be 0 + * in first byte of inquiry data. + */ + if ((*data & 0xe0) == 0) + vdev->luns |= (1 << lun); + } + + if (vdev) { + if (hd->ioc->spi_data.isRaid & (1 << target_id)) + vdev->raidVolume = 1; + else + vdev->raidVolume = 0; + } + + dprintk((KERN_INFO " target = %p\n", vdev)); + return vdev; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Update the target negotiation parameters based on the + * the Inquiry data, adapter capabilities, and NVRAM settings. + * + */ +void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target) +{ + int id = (int) target->target_id; + int nvram; + char canQ = 0; + u8 width = MPT_NARROW; + u8 factor = MPT_ASYNC; + u8 offset = 0; + u8 version, nfactor; + ScsiCfgData *pspi_data = &hd->ioc->spi_data; + + /* Set flags based on Inquiry data + */ + if (target->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) { + version = target->inq_data[2] & 0x03; + if (version < 2) { + width = 0; + factor = MPT_ULTRA2; + offset = pspi_data->maxSyncOffset; + } else { + if (target->inq_data[7] & 0x20) { + width = 1; + } + + if (target->inq_data[7] & 0x10) { + if (version == 2) + factor = MPT_ULTRA2; + else + factor = MPT_ULTRA320; + + offset = pspi_data->maxSyncOffset; + } else { + factor = MPT_ASYNC; + offset = 0; + } + } + + if (target->inq_data[7] & 0x02) { + canQ = 1; + } + + /* Update tflags based on NVRAM settings. (SCSI only) + */ + if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) { + nvram = pspi_data->nvram[id]; + nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8; + + if (width) + width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; + + if (offset > 0) { + /* Ensure factor is set to the + * maximum of: adapter, nvram, inquiry + */ + if (nfactor) { + if (nfactor < pspi_data->minSyncFactor ) + nfactor = pspi_data->minSyncFactor; + + factor = MAX (factor, nfactor); + if (factor == MPT_ASYNC) + offset = 0; + } else { + offset = 0; + factor = MPT_ASYNC; + } + } else { + factor = MPT_ASYNC; + } + } + + /* Make sure data is consistent + */ + if ((!width) && (factor < MPT_ULTRA2)) { + factor = MPT_ULTRA2; + } + + /* Save the data to the target structure. + */ + target->minSyncFactor = factor; + target->maxOffset = offset; + target->maxWidth = width; + if (canQ) { + target->tflags |= MPT_TARGET_FLAGS_Q_YES; + } + + target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO; + + /* Disable all wide (sync) extended messages + * if device is narrow (async). + */ + target->negoFlags = 0; + if (!width) + target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE; + + if (!offset) + target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC; + } + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Clear sense valid flag. + */ +static void clear_sense_flag(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq) +{ + VirtDevice *target; + int index = (int) pReq->TargetID; + + if ((target = hd->Targets[index])) { + target->tflags &= ~MPT_TARGET_FLAGS_VALID_SENSE; + } + + return; +} + +/* + * If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return. + * Else set the NEED_DV flag after Read Capacity Issued (disks) + * or Mode Sense (cdroms). Tapes, key off of Inquiry command. + */ +static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq, char *data) +{ + u8 cmd = pReq->CDB[0]; + + if (pReq->LUN[1] != 0) + return; + + if (hd->negoNvram != 0) + return; + + if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE) || + ((cmd == INQUIRY) && ((data[0] & 0x1F) == 0x01))) { + u8 dvStatus = hd->ioc->spi_data.dvStatus[pReq->TargetID]; + if (!(dvStatus & MPT_SCSICFG_DV_DONE)) { + ScsiCfgData *pSpi = &hd->ioc->spi_data; + if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) { + /* Set NEED_DV for all hidden disks + */ + Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk; + int numPDisk = pSpi->pIocPg3->NumPhysDisks; + + while (numPDisk) { + pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV; + ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID)); + pPDisk++; + numPDisk--; + } + } + pSpi->dvStatus[pReq->TargetID] |= MPT_SCSICFG_NEED_DV; + ddvtprintk(("NEED_DV set for visible disk id %d\n", + pReq->TargetID)); + }; + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * SCSI Config Page functionality ... + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_setDevicePage1Flags - add Requested and Configuration fields flags + * based on width, factor and offset parameters. + * @width: bus width + * @factor: sync factor + * @offset: sync offset + * @requestedPtr: pointer to requested values (updated) + * @configurationPtr: pointer to configuration values (updated) + * @flags: flags to block WDTR or SDTR negotiation + * + * Return: None. + * + * Remark: Called by writeSDP1 and _dv_params + */ +static void +mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags) +{ + u8 nowide = flags & MPT_TARGET_NO_NEGO_WIDE; + u8 nosync = flags & MPT_TARGET_NO_NEGO_SYNC; + + *configurationPtr = 0; + *requestedPtr = width ? MPI_SCSIDEVPAGE1_RP_WIDE : 0; + *requestedPtr |= (offset << 16) | (factor << 8); + + if (width && offset && !nowide && !nosync) { + if (factor < MPT_ULTRA160) { + *requestedPtr |= (MPI_SCSIDEVPAGE1_RP_IU + MPI_SCSIDEVPAGE1_RP_DT + + MPI_SCSIDEVPAGE1_RP_QAS); + } else if (factor < MPT_ULTRA2) { + *requestedPtr |= MPI_SCSIDEVPAGE1_RP_DT; + } + } + + if (nowide) + *configurationPtr |= MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED; + + if (nosync) + *configurationPtr |= MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED; + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_writeSDP1 - write SCSI Device Page 1 + * @hd: Pointer to a SCSI Host Strucutre + * @portnum: IOC port number + * @target_id: writeSDP1 for single ID + * @flags: MPT_SCSICFG_ALL_IDS, MPT_SCSICFG_USE_NVRAM + * + * Return: -EFAULT if read of config page header fails + * or 0 if success. + * + * Remark: If a target has been found, the settings from the + * target structure are used, else the device is set + * to async/narrow. + * + * Remark: Called during init and after a FW reload. + * Remark: We do not wait for a return, write pages sequentially. + */ +static int +mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags) +{ + MPT_ADAPTER *ioc = hd->ioc; + Config_t *pReq = NULL; + SCSIDevicePage1_t *pData = NULL; + VirtDevice *pTarget = NULL; + MPT_FRAME_HDR *mf; + MptSge_t *psge; + dma_addr_t dataDma; + u16 req_idx; + u32 frameOffset; + u32 requested, configuration, flagsLength; + int ii, nvram; + int id = 0, maxid = 0; + u8 width; + u8 factor; + u8 offset; + u8 bus = 0; + u8 negoFlags; + + if (ioc->spi_data.sdp1length == 0) + return 0; + + if (flags & MPT_SCSICFG_ALL_IDS) { + id = 0; + maxid = ioc->sh->max_id - 1; + } else if (ioc->sh) { + id = target_id; + maxid = MIN(id, ioc->sh->max_id - 1); + } + + for (; id <= maxid; id++) { + if (id == ioc->pfacts[portnum].PortSCSIID) + continue; + + if (flags & MPT_SCSICFG_USE_NVRAM) { + /* Use NVRAM, adapter maximums and target settings. + * Data over-riden by target structure information, if present + */ + width = ioc->spi_data.maxBusWidth; + offset = ioc->spi_data.maxSyncOffset; + factor = ioc->spi_data.minSyncFactor; + if (ioc->spi_data.nvram && (ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { + nvram = ioc->spi_data.nvram[id]; + + if (width) + width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; + + if (offset > 0) { + factor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8; + if (factor == 0) { + /* Key for async */ + factor = MPT_ASYNC; + offset = 0; + } else if (factor < ioc->spi_data.minSyncFactor) { + factor = ioc->spi_data.minSyncFactor; + } + } else + factor = MPT_ASYNC; + } + + /* Set the negotiation flags. + */ + negoFlags = 0; + if (!width) + negoFlags |= MPT_TARGET_NO_NEGO_WIDE; + + if (!offset) + negoFlags |= MPT_TARGET_NO_NEGO_SYNC; + } else { + width = 0; + factor = MPT_ASYNC; + offset = 0; + negoFlags = MPT_TARGET_NO_NEGO_SYNC; + } + + /* If id is not a raid volume, get the updated + * transmission settings from the target structure. + */ + if (hd->Targets && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) { + width = pTarget->maxWidth; + factor = pTarget->minSyncFactor; + offset = pTarget->maxOffset; + negoFlags = pTarget->negoFlags; + pTarget = NULL; + } + mptscsih_setDevicePage1Flags(width, factor, offset, + &requested, &configuration, negoFlags); + + + if (negoFlags == (MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC)) + continue; + + /* Get a MF for this command. + */ + if ((mf = mpt_get_msg_frame(ScsiDoneCtx, ioc->id)) == NULL) { + dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n", + ioc->name)); + return -EAGAIN; + } + + /* Set the request and the data pointers. + * Request takes: 36 bytes (32 bit SGE) + * SCSI Device Page 1 requires 16 bytes + * 40 + 16 <= size of SCSI IO Request = 56 bytes + * and MF size >= 64 bytes. + * Place data at end of MF. + */ + pReq = (Config_t *)mf; + + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + frameOffset = ioc->req_sz - sizeof(SCSIDevicePage1_t); + + pData = (SCSIDevicePage1_t *)((u8 *) mf + frameOffset); + dataDma = ioc->req_frames_dma + (req_idx * ioc->req_sz) + frameOffset; + + /* Complete the request frame (same for all requests). + */ + pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + pReq->Reserved = 0; + pReq->ChainOffset = 0; + pReq->Function = MPI_FUNCTION_CONFIG; + pReq->Reserved1[0] = 0; + pReq->Reserved1[1] = 0; + pReq->Reserved1[2] = 0; + pReq->MsgFlags = 0; + for (ii=0; ii < 8; ii++) { + pReq->Reserved2[ii] = 0; + } + pReq->Header.PageVersion = ioc->spi_data.sdp1version; + pReq->Header.PageLength = ioc->spi_data.sdp1length; + pReq->Header.PageNumber = 1; + pReq->Header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + pReq->PageAddress = cpu_to_le32(id | (bus << 8 )); + + /* Add a SGE to the config request. + */ + flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | ioc->spi_data.sdp1length * 4; + + psge = (MptSge_t *) &pReq->PageBufferSGE; + psge->FlagsLength = cpu_to_le32(flagsLength); + cpu_to_leXX(dataDma, psge->Address); + + /* Set up the common data portion + */ + pData->Header.PageVersion = pReq->Header.PageVersion; + pData->Header.PageLength = pReq->Header.PageLength; + pData->Header.PageNumber = pReq->Header.PageNumber; + pData->Header.PageType = pReq->Header.PageType; + pData->RequestedParameters = cpu_to_le32(requested); + pData->Reserved = 0; + pData->Configuration = cpu_to_le32(configuration); + + dprintk((MYIOC_s_INFO_FMT + "write SDP1: id %d pgaddr 0x%x req 0x%x config 0x%x\n", + ioc->name, id, (id | (bus<<8)), + requested, configuration)); + + mptscsih_put_msgframe(ScsiDoneCtx, ioc->id, mf); + } + + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_taskmgmt_timeout - Call back for timeout on a + * task management request. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + * + */ +static void mptscsih_taskmgmt_timeout(unsigned long data) +{ + MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data; + + dprintk((MYIOC_s_WARN_FMT "TM request timed out!\n", hd->ioc->name)); + /* Delete the timer that triggered this callback. + * Remark: del_timer checks to make sure timer is active + * before deleting. + */ + del_timer(&hd->TMtimer); + + /* Call the reset handler. Already had a TM request + * timeout - so issue a diagnostic reset + */ + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk((KERN_WARNING " Firmware Reload FAILED!!\n")); + } + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * Bus Scan and Domain Validation functionality ... + */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * mptscsih_scandv_complete - Scan and DV callback routine registered + * to Fustion MPT (base) driver. + * + * @ioc: Pointer to MPT_ADAPTER structure + * @mf: Pointer to original MPT request frame + * @mr: Pointer to MPT reply frame (NULL if TurboReply) + * + * This routine is called from mpt.c::mpt_interrupt() at the completion + * of any SCSI IO request. + * This routine is registered with the Fusion MPT (base) driver at driver + * load/init time via the mpt_register() API call. + * + * Returns 1 indicating alloc'd request frame ptr should be freed. + * + * Remark: Sets a completion code and (possibly) saves sense data + * in the IOC member localReply structure. + * Used ONLY for bus scan, DV and other internal commands. + */ +static int +mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +{ + MPT_SCSI_HOST *hd; + SCSIIORequest_t *pReq; + int completionCode; + u16 req_idx; + + if ((mf == NULL) || + (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { + printk(MYIOC_s_ERR_FMT + "ScanDvComplete, %s req frame ptr! (=%p)\n", + ioc->name, mf?"BAD":"NULL", mf); + goto wakeup; + } + + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + del_timer(&hd->timer); + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + hd->ScsiLookup[req_idx] = NULL; + pReq = (SCSIIORequest_t *) mf; + + if (mf != hd->cmdPtr) { + printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p)\n", + hd->ioc->name, mf, hd->cmdPtr); + } + hd->cmdPtr = NULL; + + ddvprintk((MYIOC_s_INFO_FMT "ScanDvComplete (mf=%p,mr=%p)\n", + hd->ioc->name, mf, mr)); + + atomic_dec(&queue_depth); + + hd->pLocal = &hd->localReply; + + /* If target struct exists, clear sense valid flag. + */ + clear_sense_flag(hd, pReq); + + if (mr == NULL) { + completionCode = MPT_SCANDV_GOOD; + } else { + SCSIIOReply_t *pReply; + u16 status; + + pReply = (SCSIIOReply_t *) mr; + + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + + ddvprintk((KERN_NOTICE " IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh, IOCLogInfo=%08xh\n", + status, pReply->SCSIState, pReply->SCSIStatus, + le32_to_cpu(pReply->IOCLogInfo))); + + switch(status) { + + case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ + completionCode = MPT_SCANDV_SELECTION_TIMEOUT; + break; + + case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ + completionCode = MPT_SCANDV_DID_RESET; + break; + + case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ + case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ + if (pReply->Function == MPI_FUNCTION_CONFIG) { + ConfigReply_t *pr = (ConfigReply_t *)mr; + completionCode = MPT_SCANDV_GOOD; + hd->pLocal->header.PageVersion = pr->Header.PageVersion; + hd->pLocal->header.PageLength = pr->Header.PageLength; + hd->pLocal->header.PageNumber = pr->Header.PageNumber; + hd->pLocal->header.PageType = pr->Header.PageType; + + } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) { + /* If the RAID Volume request is successful, + * return GOOD, else indicate that + * some type of error occurred. + */ + MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr; + if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS) + completionCode = MPT_SCANDV_GOOD; + else + completionCode = MPT_SCANDV_SOME_ERROR; + + } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { + VirtDevice *target; + u8 *sense_data; + int sz; + + /* save sense data in global & target structure + */ + completionCode = MPT_SCANDV_SENSE; + hd->pLocal->scsiStatus = pReply->SCSIStatus; + sense_data = ((u8 *)hd->ioc->sense_buf_pool + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + sz = MIN (pReq->SenseBufferLength, + SCSI_STD_SENSE_BYTES); + memcpy(hd->pLocal->sense, sense_data, sz); + + target = hd->Targets[pReq->TargetID]; + if (target) { + memcpy(target->sense, sense_data, sz); + target->tflags + |= MPT_TARGET_FLAGS_VALID_SENSE; + } + + ddvprintk((KERN_NOTICE " Check Condition, sense ptr %p\n", + sense_data)); + } else if (pReply->SCSIState & (MPI_SCSI_STATE_AUTOSENSE_FAILED | + MPI_SCSI_STATE_NO_SCSI_STATUS)) { + completionCode = MPT_SCANDV_DID_RESET; + } else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) { + completionCode = MPT_SCANDV_DID_RESET; + } else { + /* If no error, this will be equivalent + * to MPT_SCANDV_GOOD + */ + completionCode = (int) pReply->SCSIStatus; + } + break; + + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ + if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) + completionCode = MPT_SCANDV_DID_RESET; + else + completionCode = MPT_SCANDV_SOME_ERROR; + break; + + default: + completionCode = MPT_SCANDV_SOME_ERROR; + break; + + } /* switch(status) */ + + ddvprintk((KERN_NOTICE " completionCode set to %08xh\n", + completionCode)); + } /* end of address reply case */ + + hd->pLocal->completion = completionCode; + + /* MF and RF are freed in mpt_interrupt + */ +wakeup: + /* Free Chain buffers (will never chain) in scan or dv */ + //mptscsih_freeChainBuffers(hd, req_idx); + + /* + * Wake up the original calling thread + */ + scandv_wait_done = 1; + wake_up(&scandv_waitq); + + return 1; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_timer_expired - Call back for timer process. + * Used only for dv functionality. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + * + */ +static void mptscsih_timer_expired(unsigned long data) +{ + MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data; +#ifndef MPT_SCSI_USE_NEW_EH + unsigned long flags; +#endif + + + ddvprintk((MYIOC_s_WARN_FMT "Timer Expired! Cmd %p\n", hd->ioc->name, hd->cmdPtr)); + + if (hd->cmdPtr) { + MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr; + + if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) { + /* Desire to issue a task management request here. + * TM requests MUST be single threaded. + * If old eh code and no TM current, issue request. + * If new eh code, do nothing. Wait for OS cmd timeout + * for bus reset. + */ +#ifndef MPT_SCSI_USE_NEW_EH + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + if (hd->tmPending) { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + return; + } else + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + + if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + 0, 0, 0, NO_SLEEP) < 0) { + printk(MYIOC_s_WARN_FMT "TM FAILED!\n", hd->ioc->name); + } +#else + ddvtprintk((MYIOC_s_NOTE_FMT "DV Cmd Timeout: NoOp\n", hd->ioc->name)); +#endif + } else { + /* Perform a FW reload */ + if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) { + printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", hd->ioc->name); + } + } + } else { + /* This should NEVER happen */ + printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", hd->ioc->name); + } + + /* No more processing. + * TM call will generate an interrupt for SCSI TM Management. + * The FW will reply to all outstanding commands, callback will finish cleanup. + * Hard reset clean-up will free all resources. + */ + ddvprintk((MYIOC_s_WARN_FMT "Timer Expired Complete!\n", hd->ioc->name)); + + return; +} + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_do_raid - Format and Issue a RAID volume request message. + * @hd: Pointer to scsi host structure + * @action: What do be done. + * @id: Logical target id. + * @bus: Target locations bus. + * + * Returns: < 0 on a fatal error + * 0 on success + * + * Remark: Wait to return until reply processed by the ISR. + */ +static int +mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io) +{ + MpiRaidActionRequest_t *pReq; + MPT_FRAME_HDR *mf; + MptSge_t *psge; + int flagsLength; + int in_isr; + + in_isr = in_interrupt(); + if (in_isr) { + dprintk((MYIOC_s_WARN_FMT "Internal raid request not allowed in ISR context!\n", + hd->ioc->name)); + return -EPERM; + } + + /* Get and Populate a free Frame + */ + if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc->id)) == NULL) { + ddvprintk((MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", + hd->ioc->name)); + return -EAGAIN; + } + pReq = (MpiRaidActionRequest_t *)mf; + pReq->Action = action; + pReq->Reserved1 = 0; + pReq->ChainOffset = 0; + pReq->Function = MPI_FUNCTION_RAID_ACTION; + pReq->VolumeID = io->id; + pReq->VolumeBus = io->bus; + pReq->PhysDiskNum = io->physDiskNum; + pReq->MsgFlags = 0; + pReq->Reserved2 = 0; + pReq->ActionDataWord = 0; /* Reserved for this action */ + //pReq->ActionDataSGE = 0; + + psge = (MptSge_t *) &pReq->ActionDataSGE; + + /* Add a SGE to the config request. + */ + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 0; + + psge->FlagsLength = cpu_to_le32(flagsLength); + cpu_to_leXX( (dma_addr_t) -1, psge->Address); + + ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n", + hd->ioc->name, action, io->id)); + + hd->pLocal = NULL; + hd->timer.expires = jiffies + HZ*2; /* 2 second timeout */ + scandv_wait_done = 0; + + /* Save cmd pointer, for resource free if timeout or + * FW reload occurs + */ + hd->cmdPtr = mf; + + add_timer(&hd->timer); + mptscsih_put_msgframe(ScsiScanDvCtx, hd->ioc->id, mf); + wait_event(scandv_waitq, scandv_wait_done); + + if ((hd->pLocal == NULL) || (hd->pLocal->completion != MPT_SCANDV_GOOD)) + return -1; + + return 0; +} +#endif /* ~MPTSCSIH_DISABLE_DOMAIN_VALIDATION */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_do_cmd - Do internal command. + * @hd: MPT_SCSI_HOST pointer + * @io: INTERNAL_CMD pointer. + * + * Issue the specified internally generated command and do command + * specific cleanup. For bus scan / DV only. + * NOTES: If command is Inquiry and status is good, + * initialize a target structure, save the data + * + * Remark: Single threaded access only. + * + * Return: + * < 0 if an illegal command or no resources + * + * 0 if good + * + * > 0 if command complete but some type of completion error. + */ +static int +mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) +{ + MPT_FRAME_HDR *mf; + MptSge_t *mpisge; + SCSIIORequest_t *pScsiReq; + SCSIIORequest_t ReqCopy; + int my_idx, ii, dir; + int rc, cmdTimeout; + int in_isr; + char cmdLen; + char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + char cmd = io->cmd; + + in_isr = in_interrupt(); + if (in_isr) { + dprintk((MYIOC_s_WARN_FMT "Internal SCSI IO request not allowed in ISR context!\n", + hd->ioc->name)); + return -EPERM; + } + + + /* Set command specific information + */ + switch (cmd) { + case CMD_Inquiry: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + CDB[4] = io->size; + cmdTimeout = 10; + break; + + case CMD_TestUnitReady: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + cmdTimeout = 10; + break; + + case CMD_StartStopUnit: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + CDB[4] = 1; /*Spin up the disk */ + cmdTimeout = 15; + break; + + case CMD_ReadBuffer: + cmdLen = 10; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + if (io->flags & MPT_ICFLAG_ECHO) { + CDB[1] = 0x0A; + } else { + CDB[1] = 0x02; + } + + if (io->flags & MPT_ICFLAG_BUF_CAP) { + CDB[1] |= 0x01; + } + CDB[6] = (io->size >> 16) & 0xFF; + CDB[7] = (io->size >> 8) & 0xFF; + CDB[8] = io->size & 0xFF; + cmdTimeout = 10; + break; + + case CMD_WriteBuffer: + cmdLen = 10; + dir = MPI_SCSIIO_CONTROL_WRITE; + CDB[0] = cmd; + if (io->flags & MPT_ICFLAG_ECHO) { + CDB[1] = 0x0A; + } else { + CDB[1] = 0x02; + } + CDB[6] = (io->size >> 16) & 0xFF; + CDB[7] = (io->size >> 8) & 0xFF; + CDB[8] = io->size & 0xFF; + cmdTimeout = 10; + break; + + case CMD_Reserve6: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + cmdTimeout = 10; + break; + + case CMD_Release6: + cmdLen = 6; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; + cmdTimeout = 10; + break; + + case CMD_SynchronizeCache: + cmdLen = 10; + dir = MPI_SCSIIO_CONTROL_READ; + CDB[0] = cmd; +// CDB[1] = 0x02; /* set immediate bit */ + cmdTimeout = 10; + break; + + default: + /* Error Case */ + return -EFAULT; + } + + /* Get and Populate a free Frame + */ + if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc->id)) == NULL) { + ddvprintk((MYIOC_s_WARN_FMT "No msg frames!\n", + hd->ioc->name)); + return -EBUSY; + } + + pScsiReq = (SCSIIORequest_t *) mf; + + /* Get the request index */ + my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + ADD_INDEX_LOG(my_idx); /* for debug */ + + if (io->flags & MPT_ICFLAG_PHYS_DISK) { + pScsiReq->TargetID = io->physDiskNum; + pScsiReq->Bus = 0; + pScsiReq->ChainOffset = 0; + pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + } else { + pScsiReq->TargetID = io->id; + pScsiReq->Bus = io->bus; + pScsiReq->ChainOffset = 0; + pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; + } + + pScsiReq->CDBLength = cmdLen; + pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; + + pScsiReq->Reserved = 0; + + pScsiReq->MsgFlags = MPT_SCSIIO_MSG_FLAGS; + /* MsgContext set in mpt_get_msg_fram call */ + + for (ii=0; ii < 8; ii++) + pScsiReq->LUN[ii] = 0; + pScsiReq->LUN[1] = io->lun; + + if (io->flags & MPT_ICFLAG_TAGGED_CMD) + pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ); + else + pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); + + for (ii=0; ii < 16; ii++) + pScsiReq->CDB[ii] = CDB[ii]; + + pScsiReq->DataLength = cpu_to_le32(io->size); + pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma + + (my_idx * MPT_SENSE_BUFFER_ALLOC)); + + ddvprintk((MYIOC_s_INFO_FMT "Sending Command 0x%x for (%d:%d:%d)\n", + hd->ioc->name, cmd, io->bus, io->id, io->lun)); + + /* 32 bit SG only */ + mpisge = (MptSge_t *) &pScsiReq->SGL; + + if (dir == MPI_SCSIIO_CONTROL_READ) { + mpisge->FlagsLength = cpu_to_le32( + MPT_SGE_FLAGS_SSIMPLE_READ | io->size); + } else { + mpisge->FlagsLength = cpu_to_le32( + MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size); + } + + /* data_dma defaults to -1 + */ + cpu_to_leXX(io->data_dma, mpisge->Address); + + /* The ISR will free the request frame, but we need + * the information to initialize the target. Duplicate. + */ + memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t)); + + /* Issue this command after: + * finish init + * add timer + * Wait until the reply has been received + * ScsiScanDvCtx callback function will + * set hd->pLocal; + * set scandv_wait_done and call wake_up + */ + hd->pLocal = NULL; + hd->timer.expires = jiffies + HZ*cmdTimeout; + scandv_wait_done = 0; + + /* Save cmd pointer, for resource free if timeout or + * FW reload occurs + */ + hd->cmdPtr = mf; + + add_timer(&hd->timer); + mptscsih_put_msgframe(ScsiScanDvCtx, hd->ioc->id, mf); + wait_event(scandv_waitq, scandv_wait_done); + + if (hd->pLocal) { + rc = hd->pLocal->completion; + hd->pLocal->skip = 0; + + /* Always set fatal error codes in some cases. + */ + if (rc == MPT_SCANDV_SELECTION_TIMEOUT) + rc = -ENXIO; + else if (rc == MPT_SCANDV_SOME_ERROR) + rc = -rc; + } else { + rc = -EFAULT; + /* This should never happen. */ + ddvprintk((MYIOC_s_INFO_FMT "_do_cmd: Null pLocal!!!\n", + hd->ioc->name)); + } + + return rc; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks. + * @hd: Pointer to MPT_SCSI_HOST structure + * @portnum: IOC port number + * + * Uses the ISR, but with special processing. + * MUST be single-threaded. + * + * Return: 0 on completion + */ +static int +mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum) +{ + MPT_ADAPTER *ioc= hd->ioc; + VirtDevice *pTarget = NULL; + SCSIDevicePage1_t *pcfg1Data = NULL; + INTERNAL_CMD iocmd; + CONFIGPARMS cfg; + dma_addr_t cfg1_dma_addr = -1; + ConfigPageHeader_t header1; + int bus = 0; + int id = 0; + int lun = 0; + int hostId = ioc->pfacts[portnum].PortSCSIID; + int max_id; + int requested, configuration, data; + int doConfig = 0; + u8 flags, factor; + + max_id = ioc->sh->max_id - 1; + + /* Following parameters will not change + * in this routine. + */ + iocmd.cmd = CMD_SynchronizeCache; + iocmd.flags = 0; + iocmd.physDiskNum = -1; + iocmd.data = NULL; + iocmd.data_dma = -1; + iocmd.size = 0; + iocmd.rsvd = iocmd.rsvd2 = 0; + + /* No SCSI hosts + */ + if (hd->Targets == NULL) + return 0; + + /* Skip the host + */ + if (id == hostId) + id++; + + /* Write SDP1 for all SCSI devices + * Alloc memory and set up config buffer + */ + if (hd->is_spi) { + if (ioc->spi_data.sdp1length > 0) { + pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev, + ioc->spi_data.sdp1length * 4, &cfg1_dma_addr); + + if (pcfg1Data != NULL) { + doConfig = 1; + header1.PageVersion = ioc->spi_data.sdp1version; + header1.PageLength = ioc->spi_data.sdp1length; + header1.PageNumber = 1; + header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + cfg.timeout = 0; + } + } + } + + /* loop through all devices on this port + */ + while (bus < MPT_MAX_BUS) { + iocmd.bus = bus; + iocmd.id = id; + pTarget = hd->Targets[(int)id]; + + if (doConfig) { + + /* Set the negotiation flags */ + if (pTarget && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) { + flags = pTarget->negoFlags; + } else { + flags = 0; + if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { + data = hd->ioc->spi_data.nvram[id]; + + if (data & MPT_NVRAM_WIDE_DISABLE) + flags |= MPT_TARGET_NO_NEGO_WIDE; + + factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; + if ((factor == 0) || (factor == MPT_ASYNC)) + flags |= MPT_TARGET_NO_NEGO_SYNC; + } + } + + /* Force to async, narrow */ + mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested, + &configuration, flags); + pcfg1Data->RequestedParameters = le32_to_cpu(requested); + pcfg1Data->Reserved = 0; + pcfg1Data->Configuration = le32_to_cpu(configuration); + cfg.pageAddr = (bus<<8) | id; + mpt_config(hd->ioc, &cfg); + } + + /* If target Ptr NULL or if this target is NOT a disk, skip. + */ + // if (pTarget && ((pTarget->inq_data[0] & 0x1F) == 0)) { + if (pTarget) { + for (lun=0; lun <= MPT_LAST_LUN; lun++) { + /* If LUN present, issue the command + */ + if (pTarget->luns & (1< max_id) { + id = 0; + bus++; + } + } + + if (pcfg1Data) { + pci_free_consistent(ioc->pcidev, header1.PageLength * 4, pcfg1Data, cfg1_dma_addr); + } + + return 0; +} + +#ifndef MPTSCSIH_DISABLE_DOMAIN_VALIDATION +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_domainValidation - Top level handler for domain validation. + * @hd: Pointer to MPT_SCSI_HOST structure. + * + * Uses the ISR, but with special processing. + * Called from schedule, should not be in interrupt mode. + * While thread alive, do dv for all devices needing dv + * + * Return: None. + */ +static void +mptscsih_domainValidation(void *arg) +{ + MPT_SCSI_HOST *hd = NULL; + MPT_ADAPTER *ioc = NULL; + unsigned long flags; + int id, maxid, dvStatus, did; + int ii, isPhysDisk; + + spin_lock_irqsave(&dvtaskQ_lock, flags); + dvtaskQ_active = 1; + if (dvtaskQ_release) { + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + return; + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + + /* For this ioc, loop through all devices and do dv to each device. + * When complete with this ioc, search through the ioc list, and + * for each scsi ioc found, do dv for all devices. Exit when no + * device needs dv. + */ + did = 1; + while (did) { + did = 0; + for (ioc = mpt_adapter_find_first(); ioc != NULL; ioc = mpt_adapter_find_next(ioc)) { + spin_lock_irqsave(&dvtaskQ_lock, flags); + if (dvtaskQ_release) { + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + return; + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); + + /* DV only to SCSI adapters */ + if ((int)ioc->chip_type <= (int)FC929) + continue; + + /* Make sure everything looks ok */ + if (ioc->sh == NULL) + continue; + + hd = (MPT_SCSI_HOST *) ioc->sh->hostdata; + if (hd == NULL) + continue; + + maxid = MIN (ioc->sh->max_id, MPT_MAX_SCSI_DEVICES); + + for (id = 0; id < maxid; id++) { + spin_lock_irqsave(&dvtaskQ_lock, flags); + if (dvtaskQ_release) { + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + return; + } + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + dvStatus = hd->ioc->spi_data.dvStatus[id]; + + if (dvStatus & MPT_SCSICFG_NEED_DV) { + + hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_PENDING; + hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_NEED_DV; + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/4); + + /* If hidden phys disk, block IO's to all + * raid volumes + * else, process normally + */ + isPhysDisk = 0; + if (ioc->spi_data.pIocPg3) { + /* Search IOC page 3 to determine if + * this is hidden physical disk + */ + Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; + int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; + + while (numPDisk) { + if (pPDisk->PhysDiskID == id) { + isPhysDisk = 1; + break; + } + pPDisk++; + numPDisk--; + } + } + + if (isPhysDisk) { + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + if (hd->ioc->spi_data.isRaid & (1 << ii)) { + hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING; + } + } + } + + mptscsih_doDv(hd, 0, id); + did++; + hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_DONE; + hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_PENDING; + + if (isPhysDisk) { + for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { + if (hd->ioc->spi_data.isRaid & (1 << ii)) { + hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING; + } + } + } + + /* Post OS IOs that were pended while + * DV running. + */ + post_pendingQ_commands(hd); + } + } + } + } + + spin_lock_irqsave(&dvtaskQ_lock, flags); + dvtaskQ_active = 0; + spin_unlock_irqrestore(&dvtaskQ_lock, flags); + + return; +} + + +#define MPT_GET_NVRAM_VALS 0x01 +#define MPT_UPDATE_MAX 0x02 +#define MPT_SET_MAX 0x04 +#define MPT_SET_MIN 0x08 +#define MPT_FALLBACK 0x10 +#define MPT_SAVE 0x20 + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_doDv - Perform domain validation to a target. + * @hd: Pointer to MPT_SCSI_HOST structure. + * @portnum: IOC port number. + * @target: Physical ID of this target + * + * Uses the ISR, but with special processing. + * MUST be single-threaded. + * Test will exit if target is at async & narrow. + * + * Return: None. + */ +static void +mptscsih_doDv(MPT_SCSI_HOST *hd, int portnum, int id) +{ + MPT_ADAPTER *ioc = hd->ioc; + VirtDevice *pTarget = NULL; + u8 *pbuf1 = NULL; + u8 *pbuf2 = NULL; + dma_addr_t buf1_dma = -1; + dma_addr_t buf2_dma = -1; + ConfigPageHeader_t header1; + SCSIDevicePage1_t *pcfg1Data = NULL; + dma_addr_t cfg1_dma_addr = -1; + ConfigPageHeader_t header0; + SCSIDevicePage0_t *pcfg0Data = NULL; + dma_addr_t cfg0_dma_addr = -1; + DVPARAMETERS dv; + INTERNAL_CMD iocmd; + CONFIGPARMS cfg; + int rc, sz = 0; + int bufsize = 0; + int dataBufSize = 0; + int echoBufSize = 0; + int notDone; + int patt; + int repeat; + char firstPass = 1; + char doFallback = 0; + char readPage0; + char bus, lun; + + if (ioc->spi_data.sdp1length == 0) + return; + + if (ioc->spi_data.sdp0length == 0) + return; + + if (id == ioc->pfacts[portnum].PortSCSIID) + return; + + lun = 0; + bus = 0; + ddvtprintk((MYIOC_s_NOTE_FMT + "DV started: numIOs %d bus=%d, id %d dv @ %p\n", + ioc->name, atomic_read(&queue_depth), bus, id, &dv)); + + /* Prep DV structure + */ + memset (&dv, 0, sizeof(DVPARAMETERS)); + dv.id = id; + + /* Populate tmax with the current maximum + * transfer parameters for this target. + * Exit if narrow and async. + */ + dv.cmd = MPT_GET_NVRAM_VALS; + mptscsih_dv_parms(hd, &dv, NULL); + if ((!dv.max.width) && (!dv.max.offset)) + return; + + /* Prep SCSI IO structure + */ + iocmd.id = id; + iocmd.bus = bus; + iocmd.lun = lun; + iocmd.flags = 0; + iocmd.physDiskNum = -1; + iocmd.rsvd = iocmd.rsvd2 = 0; + + /* Use tagged commands if possible. + */ + pTarget = hd->Targets[id]; + if (pTarget && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) + iocmd.flags |= MPT_ICFLAG_TAGGED_CMD; + + /* Prep cfg structure + */ + cfg.pageAddr = (bus<<8) | id; + cfg.hdr = NULL; + + /* Prep SDP0 header + */ + header0.PageVersion = ioc->spi_data.sdp0version; + header0.PageLength = ioc->spi_data.sdp0length; + header0.PageNumber = 0; + header0.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + pcfg0Data = (SCSIDevicePage0_t *)pci_alloc_consistent(ioc->pcidev, + header0.PageLength * 4, &cfg0_dma_addr); + if (!pcfg0Data) + return; + + /* Prep SDP1 header + */ + header1.PageVersion = ioc->spi_data.sdp1version; + header1.PageLength = ioc->spi_data.sdp1length; + header1.PageNumber = 1; + header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; + pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev, + header1.PageLength * 4, &cfg1_dma_addr); + if (!pcfg1Data) + goto target_done; + + /* Skip this ID? Set cfg.hdr to force config page write + */ + if ((ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID) && + (!(ioc->spi_data.nvram[id] & MPT_NVRAM_ID_SCAN_ENABLE))) { + + ddvprintk((MYIOC_s_NOTE_FMT "DV Skipped: bus, id, lun (%d, %d, %d)\n", + ioc->name, bus, id, lun)); + + dv.cmd = MPT_SET_MAX; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + cfg.hdr = &header1; + goto target_done; + } + + /* Finish iocmd inititialization - hidden or visible disk? */ + if (ioc->spi_data.pIocPg3) { + /* Searc IOC page 3 for matching id + */ + Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; + int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; + + while (numPDisk) { + if (pPDisk->PhysDiskID == id) { + /* match */ + iocmd.flags |= MPT_ICFLAG_PHYS_DISK; + iocmd.physDiskNum = pPDisk->PhysDiskNum; + + /* Quiesce the IM + */ + if (mptscsih_do_raid(hd, MPI_RAID_ACTION_QUIESCE_PHYS_IO, &iocmd) < 0) { + ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name)); + goto target_done; + } + break; + } + pPDisk++; + numPDisk--; + } + } + + /* RAID Volume ID's may double for a physical device. If RAID but + * not a physical ID as well, skip DV. + */ + if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK)) + goto target_done; + + + /* Basic Test. + * Async & Narrow - Inquiry + * Async & Narrow - Inquiry + * Maximum transfer rate - Inquiry + * Compare buffers: + * If compare, test complete. + * If miscompare and first pass, repeat + * If miscompare and not first pass, fall back and repeat + */ + hd->pLocal = NULL; + readPage0 = 0; + sz = SCSI_STD_INQUIRY_BYTES; + pbuf1 = pci_alloc_consistent(ioc->pcidev, sz, &buf1_dma); + pbuf2 = pci_alloc_consistent(ioc->pcidev, sz, &buf2_dma); + if (!pbuf1 || !pbuf2) + goto target_done; + + while (1) { + ddvprintk((MYIOC_s_NOTE_FMT "DV: Start Basic test.\n", ioc->name)); + dv.cmd = MPT_SET_MIN; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + iocmd.cmd = CMD_Inquiry; + iocmd.data_dma = buf1_dma; + iocmd.data = pbuf1; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + /* Another GEM workaround. Check peripheral device type, + * if PROCESSOR, quit DV. + */ + if (((pbuf1[0] & 0x1F) == 0x03) || ((pbuf1[0] & 0x1F) > 0x08)) + goto target_done; + + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + if (doFallback) + dv.cmd = MPT_FALLBACK; + else + dv.cmd = MPT_SET_MAX; + + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + + iocmd.cmd = CMD_Inquiry; + iocmd.data_dma = buf2_dma; + iocmd.data = pbuf2; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + /* Save the return code. + * If this is the first pass, + * read SCSI Device Page 0 + * and update the target max parameters. + */ + rc = hd->pLocal->completion; + doFallback = 0; + if (rc == MPT_SCANDV_GOOD) { + if (!readPage0) { + u32 sdp0_info; + u32 sdp0_nego; + + cfg.hdr = &header0; + cfg.physAddr = cfg0_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + cfg.dir = 0; + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + sdp0_info = le32_to_cpu(pcfg0Data->Information) & 0x0E; + sdp0_nego = (le32_to_cpu(pcfg0Data->NegotiatedParameters) & 0xFF00 ) >> 8; + + /* Quantum and Fujitsu workarounds. + * Quantum: PPR U320 -> PPR reply with Ultra2 and wide + * Fujitsu: PPR U320 -> Msg Reject and Ultra2 and wide + * Resetart with a request for U160. + */ + if ((dv.now.factor == MPT_ULTRA320) && (sdp0_nego == MPT_ULTRA2)) { + doFallback = 1; + } else { + dv.cmd = MPT_UPDATE_MAX; + mptscsih_dv_parms(hd, &dv, (void *)pcfg0Data); + /* Update the SCSI device page 1 area + */ + pcfg1Data->RequestedParameters = pcfg0Data->NegotiatedParameters; + readPage0 = 1; + } + } + + /* Quantum workaround. Restart this test will the fallback + * flag set. + */ + if (doFallback == 0) { + if (memcmp(pbuf1, pbuf2, sz) != 0) { + if (!firstPass) + doFallback = 1; + } else + break; /* test complete */ + } + + + } else if ((rc == MPT_SCANDV_DID_RESET) || (rc == MPT_SCANDV_SENSE)) + doFallback = 1; /* set fallback flag */ + else + goto target_done; + + firstPass = 0; + } + } + /* Free pbuf2, but use pbuf1 for + * acquiring the (echo) buffer size. + */ + pci_free_consistent(ioc->pcidev, sz, pbuf2, buf2_dma); + pbuf2 = NULL; + ddvprintk((MYIOC_s_NOTE_FMT "DV: Basic test completed OK.\n", ioc->name)); + + /* Start the Enhanced Test. + * 0) issue TUR to clear out check conditions + * 1) read capacity of echo (regular) buffer + * 2) reserve device + * 3) do write-read-compare data pattern test + * 4) release + * 5) update nego parms to target struct + */ + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + + iocmd.cmd = CMD_TestUnitReady; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + notDone = 1; + while (notDone) { + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + if (hd->pLocal == NULL) + goto target_done; + + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) + notDone = 0; + else if (rc == MPT_SCANDV_SENSE) { + u8 skey = hd->pLocal->sense[2] & 0x0F; + u8 asc = hd->pLocal->sense[12]; + u8 ascq = hd->pLocal->sense[13]; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", + ioc->name, skey, asc, ascq)); + + if (skey == SK_UNIT_ATTENTION) + notDone++; /* repeat */ + else if ((skey == SK_NOT_READY) && + (asc == 0x04)&&(ascq == 0x01)) { + /* wait then repeat */ + mdelay (2000); + notDone++; + } else if ((skey == SK_NOT_READY) && (asc == 0x3A)) { + /* no medium, try read test anyway */ + notDone = 0; + } else { + /* All other errors are fatal. + */ + ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.", + ioc->name)); + goto target_done; + } + } else + goto target_done; + } + + iocmd.cmd = CMD_ReadBuffer; + iocmd.data_dma = buf1_dma; + iocmd.data = pbuf1; + iocmd.size = 4; + iocmd.flags |= MPT_ICFLAG_BUF_CAP; + + dataBufSize = 0; + echoBufSize = 0; + for (patt = 0; patt < 2; patt++) { + if (patt == 0) + iocmd.flags |= MPT_ICFLAG_ECHO; + else + iocmd.flags &= ~MPT_ICFLAG_ECHO; + + notDone = 1; + while (notDone) { + bufsize = 0; + + /* If not ready after 8 trials, + * give up on this device. + */ + if (notDone > 8) + goto target_done; + + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + ddvprintk(("ReadBuffer Comp Code %d", rc)); + ddvprintk((" buff: %0x %0x %0x %0x\n", + pbuf1[0], pbuf1[1], pbuf1[2], pbuf1[3])); + + if (rc == MPT_SCANDV_GOOD) { + notDone = 0; + if (iocmd.flags & MPT_ICFLAG_ECHO) { + bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3]; + } else { + bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3]; + } + } else if (rc == MPT_SCANDV_SENSE) { + u8 skey = hd->pLocal->sense[2] & 0x0F; + u8 asc = hd->pLocal->sense[12]; + u8 ascq = hd->pLocal->sense[13]; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", + ioc->name, skey, asc, ascq)); + if (skey == SK_ILLEGAL_REQUEST) { + notDone = 0; + } else if (skey == SK_UNIT_ATTENTION) { + notDone++; /* repeat */ + } else if ((skey == SK_NOT_READY) && + (asc == 0x04)&&(ascq == 0x01)) { + /* wait then repeat */ + mdelay (2000); + notDone++; + } else { + /* All other errors are fatal. + */ + ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.", + ioc->name)); + goto target_done; + } + } else { + /* All other errors are fatal + */ + goto target_done; + } + } + } + + if (iocmd.flags & MPT_ICFLAG_ECHO) + echoBufSize = bufsize; + else + dataBufSize = bufsize; + } + pci_free_consistent(ioc->pcidev, sz, pbuf1, buf1_dma); + pbuf1 = NULL; + sz = 0; + iocmd.flags &= ~MPT_ICFLAG_BUF_CAP; + + /* Use echo buffers if possible, + * Exit if both buffers are 0. + */ + if (echoBufSize > 0) { + iocmd.flags |= MPT_ICFLAG_ECHO; + if (dataBufSize > 0) + bufsize = MIN(echoBufSize, dataBufSize); + else + bufsize = echoBufSize; + } else if (dataBufSize == 0) + goto target_done; + + ddvprintk((MYIOC_s_INFO_FMT "%s Buffer Capacity %d\n", ioc->name, + (iocmd.flags & MPT_ICFLAG_ECHO) ? "Echo" : " ", bufsize)); + + /* Allocate data buffers for write-read-compare test. + */ + sz = MIN(bufsize, 1024); + pbuf1 = pci_alloc_consistent(ioc->pcidev, sz, &buf1_dma); + pbuf2 = pci_alloc_consistent(ioc->pcidev, sz, &buf2_dma); + if (!pbuf1 || !pbuf2) + goto target_done; + + /* --- loop ---- + * On first pass, always issue a reserve. + * On additional loops, only if a reset has occurred. + * iocmd.flags indicates if echo or regular buffer + */ + for (patt = 0; patt < 4; patt++) { + ddvprintk(("Pattern %d\n", patt)); + if ((iocmd.flags & MPT_ICFLAG_RESERVED) && (iocmd.flags & MPT_ICFLAG_DID_RESET)) { + iocmd.cmd = CMD_TestUnitReady; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + + iocmd.cmd = CMD_Release6; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + ddvprintk(("Release rc %d\n", rc)); + if (rc == MPT_SCANDV_GOOD) + iocmd.flags &= ~MPT_ICFLAG_RESERVED; + else + goto target_done; + } + iocmd.flags &= ~MPT_ICFLAG_RESERVED; + } + iocmd.flags &= ~MPT_ICFLAG_DID_RESET; + + repeat = 5; + while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) { + iocmd.cmd = CMD_Reserve6; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) { + iocmd.flags |= MPT_ICFLAG_RESERVED; + } else if (rc == MPT_SCANDV_SENSE) { + /* Wait if coming ready + */ + u8 skey = hd->pLocal->sense[2] & 0x0F; + u8 asc = hd->pLocal->sense[12]; + u8 ascq = hd->pLocal->sense[13]; + ddvprintk((MYIOC_s_INFO_FMT + "DV: Reserve Failed: ", ioc->name)); + ddvprintk(("SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", + skey, asc, ascq)); + + if ((skey == SK_NOT_READY) && (asc == 0x04)&& + (ascq == 0x01)) { + /* wait then repeat */ + mdelay (2000); + notDone++; + } else { + ddvprintk((MYIOC_s_INFO_FMT + "DV: Reserved Failed.", ioc->name)); + goto target_done; + } + } else { + ddvprintk((MYIOC_s_INFO_FMT "DV: Reserved Failed.", + ioc->name)); + goto target_done; + } + } + } + + mptscsih_fillbuf(pbuf1, sz, patt, 1); + iocmd.cmd = CMD_WriteBuffer; + iocmd.data_dma = buf1_dma; + iocmd.data = pbuf1; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) + ; /* Issue read buffer */ + else if (rc == MPT_SCANDV_DID_RESET) { + /* If using echo buffers, reset to data buffers. + * Else do Fallback and restart + * this test (re-issue reserve + * because of bus reset). + */ + if ((iocmd.flags & MPT_ICFLAG_ECHO) && (dataBufSize >= bufsize)) { + iocmd.flags &= ~MPT_ICFLAG_ECHO; + } else { + dv.cmd = MPT_FALLBACK; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + } + + iocmd.flags |= MPT_ICFLAG_DID_RESET; + patt = -1; + continue; + } else if (rc == MPT_SCANDV_SENSE) { + /* Restart data test if UA, else quit. + */ + u8 skey = hd->pLocal->sense[2] & 0x0F; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey, + hd->pLocal->sense[12], hd->pLocal->sense[13])); + if (skey == SK_UNIT_ATTENTION) { + patt = -1; + continue; + } else if (skey == SK_ILLEGAL_REQUEST) { + if (iocmd.flags & MPT_ICFLAG_ECHO) { + if (dataBufSize >= bufsize) { + iocmd.flags &= ~MPT_ICFLAG_ECHO; + patt = -1; + continue; + } + } + goto target_done; + } + else + goto target_done; + } else { + /* fatal error */ + goto target_done; + } + } + + iocmd.cmd = CMD_ReadBuffer; + iocmd.data_dma = buf2_dma; + iocmd.data = pbuf2; + iocmd.size = sz; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + goto target_done; + else if (hd->pLocal == NULL) + goto target_done; + else { + rc = hd->pLocal->completion; + if (rc == MPT_SCANDV_GOOD) { + /* If buffers compare, + * go to next pattern, + * else, do a fallback and restart + * data transfer test. + */ + if (memcmp (pbuf1, pbuf2, sz) == 0) { + ; /* goto next pattern */ + } else { + /* Miscompare with Echo buffer, go to data buffer, + * if that buffer exists. + * Miscompare with Data buffer, check first 4 bytes, + * some devices return capacity. Exit in this case. + */ + if (iocmd.flags & MPT_ICFLAG_ECHO) { + if (dataBufSize >= bufsize) + iocmd.flags &= ~MPT_ICFLAG_ECHO; + else + goto target_done; + } else { + if (dataBufSize == (pbuf2[1]<<16 | pbuf2[2]<<8 | pbuf2[3])) { + /* Argh. Device returning wrong data. + * Quit DV for this device. + */ + goto target_done; + } + + /* Had an actual miscompare. Slow down.*/ + dv.cmd = MPT_FALLBACK; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + } + + patt = -1; + continue; + } + } else if (rc == MPT_SCANDV_DID_RESET) { + /* Do Fallback and restart + * this test (re-issue reserve + * because of bus reset). + */ + dv.cmd = MPT_FALLBACK; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + if (mpt_config(hd->ioc, &cfg) != 0) + goto target_done; + + if ((!dv.now.width) && (!dv.now.offset)) + goto target_done; + + iocmd.flags |= MPT_ICFLAG_DID_RESET; + patt = -1; + continue; + } else if (rc == MPT_SCANDV_SENSE) { + /* Restart data test if UA, else quit. + */ + u8 skey = hd->pLocal->sense[2] & 0x0F; + ddvprintk((MYIOC_s_INFO_FMT + "SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey, + hd->pLocal->sense[12], hd->pLocal->sense[13])); + if (skey == SK_UNIT_ATTENTION) { + patt = -1; + continue; + } + else + goto target_done; + } else { + /* fatal error */ + goto target_done; + } + } + + } /* --- end of patt loop ---- */ + +target_done: + if (iocmd.flags & MPT_ICFLAG_RESERVED) { + iocmd.cmd = CMD_Release6; + iocmd.data_dma = -1; + iocmd.data = NULL; + iocmd.size = 0; + if (mptscsih_do_cmd(hd, &iocmd) < 0) + printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d", + ioc->name, id); + else if (hd->pLocal) { + if (hd->pLocal->completion == MPT_SCANDV_GOOD) + iocmd.flags &= ~MPT_ICFLAG_RESERVED; + } else { + printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d", + ioc->name, id); + } + } + + + /* Set if cfg1_dma_addr contents is valid + */ + if (cfg.hdr != NULL) { + dv.cmd = MPT_SAVE; + mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); + + /* Save the final negotiated settings to + * SCSI device page 1. + */ + cfg.hdr = &header1; + cfg.physAddr = cfg1_dma_addr; + cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + cfg.dir = 1; + mpt_config(hd->ioc, &cfg); + } + + /* If this is a RAID Passthrough, enable internal IOs + */ + if (iocmd.flags & MPT_ICFLAG_PHYS_DISK) { + if (mptscsih_do_raid(hd, MPI_RAID_ACTION_ENABLE_PHYS_IO, &iocmd) < 0) + ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name)); + } + + /* Done with the DV scan of the current target + */ + if (pcfg0Data) { + pci_free_consistent(ioc->pcidev, header0.PageLength * 4, + pcfg0Data, cfg0_dma_addr); + } + + if (pcfg1Data) { + pci_free_consistent(ioc->pcidev, header1.PageLength * 4, + pcfg1Data, cfg1_dma_addr); + } + + if (pbuf1) { + pci_free_consistent(ioc->pcidev, sz, pbuf1, buf1_dma); + pbuf1 = NULL; + } + + if (pbuf2) { + pci_free_consistent(ioc->pcidev, sz, pbuf2, buf2_dma); + pbuf2 = NULL; + } + + ddvtprintk((MYIOC_s_INFO_FMT "DV Done. IOs outstanding = %d\n", + ioc->name, atomic_read(&queue_depth))); + + return; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_dv_parms - perform a variety of operations on the + * parameters used for negotiation. + * @hd: Pointer to a SCSI host. + * @dv: Pointer to a structure that contains the maximum and current + * negotiated parameters. + */ +static void +mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage) +{ + VirtDevice *pTarget = NULL; + SCSIDevicePage0_t *pPage0 = NULL; + SCSIDevicePage1_t *pPage1 = NULL; + int val = 0, data, configuration; + u8 width = 0; + u8 offset = 0; + u8 factor = 0; + u8 negoFlags = 0; + u8 cmd = dv->cmd; + u8 id = dv->id; + + switch (cmd) { + case MPT_GET_NVRAM_VALS: + ddvprintk((MYIOC_s_NOTE_FMT "Getting NVRAM: ", + hd->ioc->name)); + /* Get the NVRAM values and save in tmax + * If not an LVD bus, the adapter minSyncFactor has been + * already throttled back. + */ + if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) { + width = pTarget->maxWidth; + offset = pTarget->maxOffset; + factor = pTarget->minSyncFactor; + negoFlags = pTarget->negoFlags; + } else { + if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { + data = hd->ioc->spi_data.nvram[id]; + width = data & MPT_NVRAM_WIDE_DISABLE ? 0 : 1; + if ((offset = hd->ioc->spi_data.maxSyncOffset) == 0) + factor = MPT_ASYNC; + else { + factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT; + if ((factor == 0) || (factor == MPT_ASYNC)){ + factor = MPT_ASYNC; + offset = 0; + } + } + } else { + width = MPT_NARROW; + offset = 0; + factor = MPT_ASYNC; + } + + /* Set the negotiation flags */ + negoFlags = 0; + if (!width) + negoFlags |= MPT_TARGET_NO_NEGO_WIDE; + + if (!offset) + negoFlags |= MPT_TARGET_NO_NEGO_SYNC; + } + + /* limit by adapter capabilities */ + width = MIN(width, hd->ioc->spi_data.maxBusWidth); + offset = MIN(offset, hd->ioc->spi_data.maxSyncOffset); + factor = MAX(factor, hd->ioc->spi_data.minSyncFactor); + + /* Check Consistency */ + if (offset && (factor < MPT_ULTRA2) && !width) + factor = MPT_ULTRA2; + + dv->max.width = width; + dv->max.offset = offset; + dv->max.factor = factor; + dv->max.flags = negoFlags; + ddvprintk((" width %d, factor %x, offset %x flags %x\n", + width, factor, offset, negoFlags)); + break; + + case MPT_UPDATE_MAX: + ddvprintk((MYIOC_s_NOTE_FMT + "Updating with SDP0 Data: ", hd->ioc->name)); + /* Update tmax values with those from Device Page 0.*/ + pPage0 = (SCSIDevicePage0_t *) pPage; + if (pPage0) { + val = cpu_to_le32(pPage0->NegotiatedParameters); + dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0; + dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16; + dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; + } + + dv->now.width = dv->max.width; + dv->now.offset = dv->max.offset; + dv->now.factor = dv->max.factor; + ddvprintk(("width %d, factor %x, offset %x, flags %x\n", + dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags)); + break; + + case MPT_SET_MAX: + ddvprintk((MYIOC_s_NOTE_FMT "Setting Max: ", + hd->ioc->name)); + /* Set current to the max values. Update the config page.*/ + dv->now.width = dv->max.width; + dv->now.offset = dv->max.offset; + dv->now.factor = dv->max.factor; + dv->now.flags = dv->max.flags; + + pPage1 = (SCSIDevicePage1_t *)pPage; + if (pPage1) { + mptscsih_setDevicePage1Flags (dv->now.width, dv->now.factor, + dv->now.offset, &val, &configuration, dv->now.flags); + pPage1->RequestedParameters = le32_to_cpu(val); + pPage1->Reserved = 0; + pPage1->Configuration = le32_to_cpu(configuration); + + } + + ddvprintk(("width %d, factor %x, offset %x request %x, config %x\n", + dv->now.width, dv->now.factor, dv->now.offset, val, configuration)); + break; + + case MPT_SET_MIN: + ddvprintk((MYIOC_s_NOTE_FMT "Setting Min: ", + hd->ioc->name)); + /* Set page to asynchronous and narrow + * Do not update now, breaks fallback routine. */ + width = MPT_NARROW; + offset = 0; + factor = MPT_ASYNC; + negoFlags = dv->max.flags; + + pPage1 = (SCSIDevicePage1_t *)pPage; + if (pPage1) { + mptscsih_setDevicePage1Flags (width, factor, + offset, &val, &configuration, negoFlags); + pPage1->RequestedParameters = le32_to_cpu(val); + pPage1->Reserved = 0; + pPage1->Configuration = le32_to_cpu(configuration); + } + ddvprintk(("width %d, factor %x, offset %x request %x config %x\n", + dv->now.width, dv->now.factor, + dv->now.offset, val, configuration)); + break; + + case MPT_FALLBACK: + ddvprintk((MYIOC_s_NOTE_FMT + "Fallback: Start: offset %d, factor %x, width %d \n", + hd->ioc->name, dv->now.offset, + dv->now.factor, dv->now.width)); + width = dv->now.width; + offset = dv->now.offset; + factor = dv->now.factor; + if ((offset) && (dv->max.width)) { + if (factor < MPT_ULTRA160) + factor = MPT_ULTRA160; + else if (factor < MPT_ULTRA2) { + factor = MPT_ULTRA2; + width = MPT_WIDE; + } else if ((factor == MPT_ULTRA2) && width) { + factor = MPT_ULTRA2; + width = MPT_NARROW; + } else if (factor < MPT_ULTRA) { + factor = MPT_ULTRA; + width = MPT_WIDE; + } else if ((factor == MPT_ULTRA) && width) { + factor = MPT_ULTRA; + width = MPT_NARROW; + } else if (factor < MPT_FAST) { + factor = MPT_FAST; + width = MPT_WIDE; + } else if ((factor == MPT_FAST) && width) { + factor = MPT_FAST; + width = MPT_NARROW; + } else if (factor < MPT_SCSI) { + factor = MPT_SCSI; + width = MPT_WIDE; + } else if ((factor == MPT_SCSI) && width) { + factor = MPT_SCSI; + width = MPT_NARROW; + } else { + factor = MPT_ASYNC; + offset = 0; + } + + } else if (offset) { + width = MPT_NARROW; + if (factor < MPT_ULTRA) + factor = MPT_ULTRA; + else if (factor < MPT_FAST) + factor = MPT_FAST; + else if (factor < MPT_SCSI) + factor = MPT_SCSI; + else { + factor = MPT_ASYNC; + offset = 0; + } + + } else { + width = MPT_NARROW; + factor = MPT_ASYNC; + } + + dv->now.width = width; + dv->now.offset = offset; + dv->now.factor = factor; + dv->now.flags = dv->max.flags; + + pPage1 = (SCSIDevicePage1_t *)pPage; + if (pPage1) { + mptscsih_setDevicePage1Flags (width, factor, offset, &val, + &configuration, dv->now.flags); + + pPage1->RequestedParameters = le32_to_cpu(val); + pPage1->Reserved = 0; + pPage1->Configuration = le32_to_cpu(configuration); + } + + ddvprintk(("Finish: offset %d, factor %x, width %d, request %x config %x\n", + dv->now.offset, dv->now.factor, dv->now.width, val, configuration)); + break; + + case MPT_SAVE: + ddvprintk((MYIOC_s_NOTE_FMT + "Saving to Target structure: ", hd->ioc->name)); + ddvprintk(("offset %d, factor %x, width %d \n", + dv->now.offset, dv->now.factor, dv->now.width)); + + /* Save these values to target structures + * or overwrite nvram (phys disks only). + */ + + if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume ) { + pTarget->maxWidth = dv->now.width; + pTarget->maxOffset = dv->now.offset; + pTarget->minSyncFactor = dv->now.factor; + } else { + /* Preserv all flags, use + * read-modify-write algorithm + */ + data = hd->ioc->spi_data.nvram[id]; + + if (dv->now.width) + data &= ~MPT_NVRAM_WIDE_DISABLE; + else + data |= MPT_NVRAM_WIDE_DISABLE; + + if (!dv->now.offset) + factor = MPT_ASYNC; + + data &= ~MPT_NVRAM_SYNC_MASK; + data |= (dv->now.factor << MPT_NVRAM_SYNC_SHIFT) & MPT_NVRAM_SYNC_MASK; + + hd->ioc->spi_data.nvram[id] = data; + } + break; + } +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* mptscsih_fillbuf - fill a buffer with a special data pattern + * cleanup. For bus scan only. + * + * @buffer: Pointer to data buffer to be filled. + * @size: Number of bytes to fill + * @index: Pattern index + * @width: bus width, 0 (8 bits) or 1 (16 bits) + */ +static void +mptscsih_fillbuf(char *buffer, int size, int index, int width) +{ + char *ptr = buffer; + int ii; + char byte; + short val; + + switch (index) { + case 0: + + if (width) { + /* Pattern: 0000 FFFF 0000 FFFF + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x02) + *ptr = 0xFF; + else + *ptr = 0x00; + } + } else { + /* Pattern: 00 FF 00 FF + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x01) + *ptr = 0xFF; + else + *ptr = 0x00; + } + } + break; + + case 1: + if (width) { + /* Pattern: 5555 AAAA 5555 AAAA 5555 + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x02) + *ptr = 0xAA; + else + *ptr = 0x55; + } + } else { + /* Pattern: 55 AA 55 AA 55 + */ + for (ii=0; ii < size; ii++, ptr++) { + if (ii & 0x01) + *ptr = 0xAA; + else + *ptr = 0x55; + } + } + break; + + case 2: + /* Pattern: 00 01 02 03 04 05 + * ... FE FF 00 01.. + */ + for (ii=0; ii < size; ii++, ptr++) + *ptr = (char) ii; + break; + + case 3: + if (width) { + /* Wide Pattern: FFFE 0001 FFFD 0002 + * ... 4000 DFFF 8000 EFFF + */ + byte = 0; + for (ii=0; ii < size/2; ii++) { + /* Create the base pattern + */ + val = (1 << byte); + /* every 64 (0x40) bytes flip the pattern + * since we fill 2 bytes / iteration, + * test for ii = 0x20 + */ + if (ii & 0x20) + val = ~(val); + + if (ii & 0x01) { + *ptr = (char)( (val & 0xFF00) >> 8); + ptr++; + *ptr = (char)(val & 0xFF); + byte++; + byte &= 0x0F; + } else { + val = ~val; + *ptr = (char)( (val & 0xFF00) >> 8); + ptr++; + *ptr = (char)(val & 0xFF); + } + + ptr++; + } + } else { + /* Narrow Pattern: FE 01 FD 02 FB 04 + * .. 7F 80 01 FE 02 FD ... 80 7F + */ + byte = 0; + for (ii=0; ii < size; ii++, ptr++) { + /* Base pattern - first 32 bytes + */ + if (ii & 0x01) { + *ptr = (1 << byte); + byte++; + byte &= 0x07; + } else { + *ptr = (char) (~(1 << byte)); + } + + /* Flip the pattern every 32 bytes + */ + if (ii & 0x20) + *ptr = ~(*ptr); + } + } + break; + } +} +#endif /* ~MPTSCSIH_DISABLE_DOMAIN_VALIDATION */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* Commandline Parsing routines and defines. + * + * insmod format: + * insmod mptscsih mptscsih="width:1 dv:n factor:0x09" + * boot format: + * mptscsih=width:1,dv:n,factor:0x8 + * + */ +#ifdef MODULE +#define ARG_SEP ' ' +#else +#define ARG_SEP ',' +#endif + +static char setup_token[] __initdata = + "dv:" + "width:" + "factor:" + ; /* DONNOT REMOVE THIS ';' */ + +#define OPT_DV 1 +#define OPT_MAX_WIDTH 2 +#define OPT_MIN_SYNC_FACTOR 3 + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +__init get_setup_token(char *p) +{ + char *cur = setup_token; + char *pc; + int i = 0; + + while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { + ++pc; + ++i; + if (!strncmp(p, cur, pc - cur)) + return i; + cur = pc; + } + return 0; +} + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static int +__init mptscsih_setup(char *str) +{ + char *cur = str; + char *pc, *pv; + unsigned long val; + int c; + + printk("KERN_WARNING: mptscsih_setup arg %s\n", str); + + while (cur != NULL && (pc = strchr(cur, ':')) != NULL) { + char *pe; + + val = 0; + pv = pc; + c = *++pv; + + if (c == 'n') + val = 0; + else if (c == 'y') + val = 1; + else + val = (int) simple_strtoul(pv, &pe, 0); + + printk("Found Token: %s, value %x\n", cur, (int)val); + switch (get_setup_token(cur)) { + case OPT_DV: + driver_setup.dv = val; + break; + + case OPT_MAX_WIDTH: + driver_setup.max_width = val; + break; + + case OPT_MIN_SYNC_FACTOR: + driver_setup.min_sync_fac = val; + break; + + default: + printk("mptscsih_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur); + break; + } + + if ((cur = strchr(cur, ARG_SEP)) != NULL) + ++cur; + } + return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff -Nru a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h --- a/drivers/message/fusion/mptscsih.h Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/mptscsih.h Thu May 30 21:28:59 2002 @@ -15,11 +15,12 @@ * * (see also mptbase.c) * - * Copyright (c) 1999-2001 LSI Logic Corporation + * Copyright (c) 1999-2002 LSI Logic Corporation * Originally By: Steven J. Ralston - * (mailto:Steve.Ralston@lsil.com) + * (mailto:netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: mptscsih.h,v 1.7 2001/01/11 16:56:43 sralston Exp $ + * $Id: mptscsih.h,v 1.16 2002/02/27 18:44:30 sralston Exp $ */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -68,14 +69,47 @@ * SCSI Public stuff... */ -#ifdef __sparc__ -#define MPT_SCSI_CAN_QUEUE 63 -#define MPT_SCSI_CMD_PER_LUN 63 - /* FIXME! Still investigating qd=64 hang on sparc64... */ -#else -#define MPT_SCSI_CAN_QUEUE 64 -#define MPT_SCSI_CMD_PER_LUN 64 -#endif +/* + * Try to keep these at 2^N-1 + */ +#define MPT_FC_CAN_QUEUE 63 +#define MPT_SCSI_CAN_QUEUE 31 +#define MPT_SCSI_CMD_PER_LUN 7 + +#define MPT_SCSI_SG_DEPTH 40 + +/* To disable domain validation, uncomment the + * following line. No effect for FC devices. + * For SCSI devices, driver will negotiate to + * NVRAM settings (if available) or to maximum adapter + * capabilities. + */ +/* #define MPTSCSIH_DISABLE_DOMAIN_VALIDATION */ + + +/* SCSI driver setup structure. Settings can be overridden + * by command line options. + */ +#define MPTSCSIH_DOMAIN_VALIDATION 1 +#define MPTSCSIH_MAX_WIDTH 1 +#define MPTSCSIH_MIN_SYNC 0x08 + +struct mptscsih_driver_setup +{ + u8 dv; + u8 max_width; + u8 min_sync_fac; +}; + + +#define MPTSCSIH_DRIVER_SETUP \ +{ \ + MPTSCSIH_DOMAIN_VALIDATION, \ + MPTSCSIH_MAX_WIDTH, \ + MPTSCSIH_MIN_SYNC, \ +} + + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -143,6 +177,7 @@ #define x_scsi_dev_reset mptscsih_dev_reset #define x_scsi_host_reset mptscsih_host_reset #define x_scsi_bios_param mptscsih_bios_param +#define x_scsi_select_queue_depths mptscsih_select_queue_depths #define x_scsi_taskmgmt_bh mptscsih_taskmgmt_bh #define x_scsi_old_abort mptscsih_old_abort @@ -155,7 +190,6 @@ extern int x_scsi_detect(Scsi_Host_Template *); extern int x_scsi_release(struct Scsi_Host *host); extern const char *x_scsi_info(struct Scsi_Host *); -/*extern int x_scsi_command(Scsi_Cmnd *);*/ extern int x_scsi_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); #ifdef MPT_SCSI_USE_NEW_EH extern int x_scsi_abort(Scsi_Cmnd *); @@ -167,6 +201,7 @@ extern int x_scsi_old_reset(Scsi_Cmnd *, unsigned int); #endif extern int x_scsi_bios_param(Disk *, kdev_t, int *); +extern void x_scsi_select_queue_depths(struct Scsi_Host *, Scsi_Device *); extern void x_scsi_taskmgmt_bh(void *); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) @@ -194,10 +229,11 @@ bios_param: x_scsi_bios_param, \ can_queue: MPT_SCSI_CAN_QUEUE, \ this_id: -1, \ - sg_tablesize: 25, \ + sg_tablesize: MPT_SCSI_SG_DEPTH, \ cmd_per_lun: MPT_SCSI_CMD_PER_LUN, \ unchecked_isa_dma: 0, \ use_clustering: ENABLE_CLUSTERING, \ + use_new_eh_code: 1 \ } #else @@ -216,7 +252,7 @@ bios_param: x_scsi_bios_param, \ can_queue: MPT_SCSI_CAN_QUEUE, \ this_id: -1, \ - sg_tablesize: 25, \ + sg_tablesize: MPT_SCSI_SG_DEPTH, \ cmd_per_lun: MPT_SCSI_CMD_PER_LUN, \ unchecked_isa_dma: 0, \ use_clustering: ENABLE_CLUSTERING \ diff -Nru a/drivers/message/fusion/scsi3.h b/drivers/message/fusion/scsi3.h --- a/drivers/message/fusion/scsi3.h Thu May 30 21:28:59 2002 +++ b/drivers/message/fusion/scsi3.h Thu May 30 21:28:59 2002 @@ -4,11 +4,12 @@ * (Ultimately) SCSI-3 definitions; for now, inheriting * SCSI-2 definitions. * - * Copyright (c) 1996-2001 Steven J. Ralston + * Copyright (c) 1996-2002 Steven J. Ralston * Written By: Steven J. Ralston (19960517) - * (mailto:Steve.Ralston@lsil.com) + * (mailto:sjralston1@netscape.net) + * (mailto:Pam.Delaney@lsil.com) * - * $Id: scsi3.h,v 1.5 2001/04/06 14:31:32 sralston Exp $ + * $Id: scsi3.h,v 1.9 2002/02/27 18:45:02 sralston Exp $ */ #ifndef SCSI3_H_INCLUDED @@ -63,7 +64,10 @@ #define CMD_Write10 0x2A #define CMD_WriteVerify 0x2E #define CMD_Verify 0x2F +#define CMD_SynchronizeCache 0x35 #define CMD_ReadDefectData 0x37 +#define CMD_WriteBuffer 0x3B +#define CMD_ReadBuffer 0x3C #define CMD_ReadLong 0x3E #define CMD_LogSelect 0x4C #define CMD_LogSense 0x4D diff -Nru a/drivers/net/eepro100.c b/drivers/net/eepro100.c --- a/drivers/net/eepro100.c Thu May 30 21:28:59 2002 +++ b/drivers/net/eepro100.c Thu May 30 21:28:59 2002 @@ -25,6 +25,8 @@ Disabled FC and ER, to avoid lockups when when we get FCP interrupts. 2000 Jul 17 Goutham Rao PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary + 2000 Aug 31 David Mosberger + RX_ALIGN support: enables rx DMA without causing unaligned accesses. */ static const char *version = @@ -41,14 +43,18 @@ static int txdmacount = 128; static int rxdmacount /* = 0 */; +#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \ + defined(__arm__) + /* align rx buffers to 2 bytes so that IP header is aligned */ +# define RX_ALIGN +# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed)) +#else +# define RxFD_ALIGNMENT +#endif + /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method. Lower values use more memory, but are faster. */ -#if defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \ - defined(__arm__) -static int rx_copybreak = 1518; -#else static int rx_copybreak = 200; -#endif /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 20; @@ -377,18 +383,18 @@ /* The Speedo3 Rx and Tx frame/buffer descriptors. */ struct descriptor { /* A generic descriptor. */ - s32 cmd_status; /* All command and status fields. */ + volatile s32 cmd_status; /* All command and status fields. */ u32 link; /* struct descriptor * */ unsigned char params[0]; }; /* The Speedo3 Rx and Tx buffer descriptors. */ struct RxFD { /* Receive frame descriptor. */ - s32 status; + volatile s32 status; u32 link; /* struct RxFD * */ u32 rx_buf_addr; /* void * */ u32 count; -}; +} RxFD_ALIGNMENT; /* Selected elements of the Tx/RxFD.status word. */ enum RxFD_bits { @@ -523,7 +529,9 @@ static int eepro100_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +#if defined(MODULE) || defined(CONFIG_HOTPLUG) static void eepro100_remove_one (struct pci_dev *pdev); +#endif #ifdef CONFIG_PM static int eepro100_suspend (struct pci_dev *pdev, u32 state); static int eepro100_resume (struct pci_dev *pdev); @@ -1233,6 +1241,9 @@ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); +#ifdef RX_ALIGN + skb_reserve(skb, 2); /* Align IP on 16 byte boundary */ +#endif sp->rx_skbuff[i] = skb; if (skb == NULL) break; /* OK. Just initially short of Rx bufs. */ @@ -1624,6 +1635,9 @@ struct sk_buff *skb; /* Get a fresh skbuff to replace the consumed one. */ skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); +#ifdef RX_ALIGN + skb_reserve(skb, 2); /* Align IP on 16 byte boundary */ +#endif sp->rx_skbuff[entry] = skb; if (skb == NULL) { sp->rx_ringp[entry] = NULL; @@ -2307,7 +2321,9 @@ name: "eepro100", id_table: eepro100_pci_tbl, probe: eepro100_init_one, +# if defined(MODULE) || defined(CONFIG_HOTPLUG) remove: __devexit_p(eepro100_remove_one), +# endif #ifdef CONFIG_PM suspend: eepro100_suspend, resume: eepro100_resume, diff -Nru a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c --- a/drivers/net/tulip/media.c Thu May 30 21:28:59 2002 +++ b/drivers/net/tulip/media.c Thu May 30 21:28:59 2002 @@ -278,6 +278,10 @@ for (i = 0; i < init_length; i++) outl(init_sequence[i], ioaddr + CSR12); } + + (void) inl(ioaddr + CSR6); /* flush CSR12 writes */ + udelay(500); /* Give MII time to recover */ + tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; diff -Nru a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c --- a/drivers/scsi/megaraid.c Thu May 30 21:28:58 2002 +++ b/drivers/scsi/megaraid.c Thu May 30 21:28:58 2002 @@ -2036,9 +2036,6 @@ #if DEBUG -static unsigned int cum_time = 0; -static unsigned int cum_time_cnt = 0; - static void showMbox (mega_scb * pScb) { mega_mailbox *mbox; @@ -2047,7 +2044,7 @@ return; mbox = (mega_mailbox *) pScb->mboxData; - printk ("%u cmd:%x id:%x #scts:%x lba:%x addr:%x logdrv:%x #sg:%x\n", + printk ("%lu cmd:%x id:%x #scts:%x lba:%x addr:%x logdrv:%x #sg:%x\n", pScb->SCpnt->pid, mbox->cmd, mbox->cmdid, mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv, mbox->numsgelements); @@ -2292,10 +2289,6 @@ phys_mbox = virt_to_bus (megaCfg->mbox); #endif -#if DEBUG - ShowMbox (pScb); -#endif - /* Wait until mailbox is free */ if (mega_busyWaitMbox (megaCfg)) { printk ("Blocked mailbox......!!\n"); @@ -3356,9 +3349,13 @@ mbox[0] = IS_BIOS_ENABLED; mbox[2] = GET_BIOS; - mboxpnt->xferaddr = virt_to_bus ((void *) megacfg->mega_buffer); + mboxpnt->xferaddr = pci_map_single(megacfg->dev, + (void *) megacfg->mega_buffer, (2 * 1024L), + PCI_DMA_FROMDEVICE); ret = megaIssueCmd (megacfg, mbox, NULL, 0); + + pci_unmap_single(megacfg->dev, mboxpnt->xferaddr, 2 * 1024L, PCI_DMA_FROMDEVICE); return (*(char *) megacfg->mega_buffer); } diff -Nru a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c --- a/drivers/scsi/scsi_ioctl.c Thu May 30 21:28:59 2002 +++ b/drivers/scsi/scsi_ioctl.c Thu May 30 21:28:59 2002 @@ -196,6 +196,9 @@ unsigned int needed, buf_needed; int timeout, retries, result; int data_direction, gfp_mask = GFP_KERNEL; +#if __GNUC__ < 3 + int foo; +#endif if (!sic) return -EINVAL; @@ -209,11 +212,21 @@ if (verify_area(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command))) return -EFAULT; +#if __GNUC__ < 3 + foo = __get_user(inlen, &sic->inlen); + if (foo) + return -EFAULT; + + foo = __get_user(outlen, &sic->outlen); + if (foo) + return -EFAULT; +#else if(__get_user(inlen, &sic->inlen)) return -EFAULT; if(__get_user(outlen, &sic->outlen)) return -EFAULT; +#endif /* * We do not transfer more than MAX_BUF with this interface. diff -Nru a/fs/fcntl.c b/fs/fcntl.c --- a/fs/fcntl.c Thu May 30 21:28:58 2002 +++ b/fs/fcntl.c Thu May 30 21:28:58 2002 @@ -315,6 +315,7 @@ * to fix this will be in libc. */ err = filp->f_owner.pid; + force_successful_syscall_return(); break; case F_SETOWN: lock_kernel(); diff -Nru a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c --- a/fs/nfsd/nfsctl.c Thu May 30 21:28:58 2002 +++ b/fs/nfsd/nfsctl.c Thu May 30 21:28:58 2002 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff -Nru a/fs/proc/base.c b/fs/proc/base.c --- a/fs/proc/base.c Thu May 30 21:28:58 2002 +++ b/fs/proc/base.c Thu May 30 21:28:58 2002 @@ -515,7 +515,24 @@ } #endif +static loff_t mem_lseek(struct file * file, loff_t offset, int orig) +{ + switch (orig) { + case 0: + file->f_pos = offset; + break; + case 1: + file->f_pos += offset; + break; + default: + return -EINVAL; + } + force_successful_syscall_return(); + return file->f_pos; +} + static struct file_operations proc_mem_operations = { + llseek: mem_lseek, read: mem_read, write: mem_write, open: mem_open, diff -Nru a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h --- a/include/asm-i386/hw_irq.h Thu May 30 21:28:59 2002 +++ b/include/asm-i386/hw_irq.h Thu May 30 21:28:59 2002 @@ -108,4 +108,6 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} #endif +extern irq_desc_t irq_desc [NR_IRQS]; + #endif /* _ASM_HW_IRQ_H */ diff -Nru a/include/asm-i386/page.h b/include/asm-i386/page.h --- a/include/asm-i386/page.h Thu May 30 21:28:58 2002 +++ b/include/asm-i386/page.h Thu May 30 21:28:58 2002 @@ -30,8 +30,8 @@ #endif -#define clear_user_page(page, vaddr) clear_page(page) -#define copy_user_page(to, from, vaddr) copy_page(to, from) +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) /* * These are used to make use of C type-checking.. diff -Nru a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h --- a/include/asm-i386/ptrace.h Thu May 30 21:28:58 2002 +++ b/include/asm-i386/ptrace.h Thu May 30 21:28:58 2002 @@ -58,6 +58,7 @@ #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) #define instruction_pointer(regs) ((regs)->eip) extern void show_regs(struct pt_regs *); +#define force_successful_syscall_return() do { } while (0) #endif #endif diff -Nru a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h --- a/include/asm-ia64/bitops.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/bitops.h Thu May 30 21:28:59 2002 @@ -326,6 +326,12 @@ return exp - 0xffff; } +static int +fls (int x) +{ + return ia64_fls((unsigned int) x); +} + /* * ffs: find first bit set. This is defined the same way as the libc and compiler builtin * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on diff -Nru a/include/asm-ia64/efi.h b/include/asm-ia64/efi.h --- a/include/asm-ia64/efi.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/efi.h Thu May 30 21:28:59 2002 @@ -7,9 +7,9 @@ * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond - * Copyright (C) 1999 Hewlett-Packard Co. - * Copyright (C) 1999 David Mosberger-Tang - * Copyright (C) 1999 Stephane Eranian + * Copyright (C) 1999, 2002 Hewlett-Packard Co. + * David Mosberger-Tang + * Stephane Eranian */ #include #include @@ -258,8 +258,9 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timeval *tv); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ -extern u64 efi_get_iobase (void); -extern u32 efi_mem_type (u64 phys_addr); +extern u64 efi_get_iobase (void); +extern u32 efi_mem_type (unsigned long phys_addr); +extern u64 efi_mem_attributes (unsigned long phys_addr); /* * Variable Attributes diff -Nru a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h --- a/include/asm-ia64/elf.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/elf.h Thu May 30 21:28:59 2002 @@ -38,7 +38,7 @@ * the way of the program that it will "exec", and that there is * sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000) /* diff -Nru a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h --- a/include/asm-ia64/kregs.h Thu May 30 21:28:58 2002 +++ b/include/asm-ia64/kregs.h Thu May 30 21:28:58 2002 @@ -2,8 +2,8 @@ #define _ASM_IA64_KREGS_H /* - * Copyright (C) 2001 Hewlett-Packard Co - * Copyright (C) 2001 David Mosberger-Tang + * Copyright (C) 2001-2002 Hewlett-Packard Co + * David Mosberger-Tang */ /* * This file defines the kernel register usage convention used by Linux/ia64. @@ -30,5 +30,122 @@ #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ #define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */ #define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */ + +/* Processor status register bits: */ +#define IA64_PSR_BE_BIT 1 +#define IA64_PSR_UP_BIT 2 +#define IA64_PSR_AC_BIT 3 +#define IA64_PSR_MFL_BIT 4 +#define IA64_PSR_MFH_BIT 5 +#define IA64_PSR_IC_BIT 13 +#define IA64_PSR_I_BIT 14 +#define IA64_PSR_PK_BIT 15 +#define IA64_PSR_DT_BIT 17 +#define IA64_PSR_DFL_BIT 18 +#define IA64_PSR_DFH_BIT 19 +#define IA64_PSR_SP_BIT 20 +#define IA64_PSR_PP_BIT 21 +#define IA64_PSR_DI_BIT 22 +#define IA64_PSR_SI_BIT 23 +#define IA64_PSR_DB_BIT 24 +#define IA64_PSR_LP_BIT 25 +#define IA64_PSR_TB_BIT 26 +#define IA64_PSR_RT_BIT 27 +/* The following are not affected by save_flags()/restore_flags(): */ +#define IA64_PSR_CPL0_BIT 32 +#define IA64_PSR_CPL1_BIT 33 +#define IA64_PSR_IS_BIT 34 +#define IA64_PSR_MC_BIT 35 +#define IA64_PSR_IT_BIT 36 +#define IA64_PSR_ID_BIT 37 +#define IA64_PSR_DA_BIT 38 +#define IA64_PSR_DD_BIT 39 +#define IA64_PSR_SS_BIT 40 +#define IA64_PSR_RI_BIT 41 +#define IA64_PSR_ED_BIT 43 +#define IA64_PSR_BN_BIT 44 + +#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT) +#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT) +#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT) +#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT) +#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT) +#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT) +#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT) +#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT) +#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT) +#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT) +#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT) +#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT) +#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT) +#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT) +#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT) +#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT) +#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT) +#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT) +#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT) +/* The following are not affected by save_flags()/restore_flags(): */ +#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT) +#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT) +#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT) +#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT) +#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT) +#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT) +#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT) +#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT) +#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT) +#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT) + +/* User mask bits: */ +#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH) + +/* Default Control Register */ +#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */ +#define IA64_DCR_BE_BIT 1 /* big-endian default */ +#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */ +#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */ +#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */ +#define IA64_DCR_DK_BIT 10 /* defer key miss faults */ +#define IA64_DCR_DX_BIT 11 /* defer key permission faults */ +#define IA64_DCR_DR_BIT 12 /* defer access right faults */ +#define IA64_DCR_DA_BIT 13 /* defer access bit faults */ +#define IA64_DCR_DD_BIT 14 /* defer debug faults */ + +#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT) +#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT) +#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT) +#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT) +#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT) +#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT) +#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT) +#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT) +#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT) +#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT) + +/* Interrupt Status Register */ +#define IA64_ISR_X_BIT 32 /* execute access */ +#define IA64_ISR_W_BIT 33 /* write access */ +#define IA64_ISR_R_BIT 34 /* read access */ +#define IA64_ISR_NA_BIT 35 /* non-access */ +#define IA64_ISR_SP_BIT 36 /* speculative load exception */ +#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */ +#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */ +#define IA64_ISR_CODE_MASK 0xf + +#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT) +#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT) +#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT) +#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT) +#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT) +#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT) +#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT) + +/* ISR code field for non-access instructions */ +#define IA64_ISR_CODE_TPA 0 +#define IA64_ISR_CODE_FC 1 +#define IA64_ISR_CODE_PROBE 2 +#define IA64_ISR_CODE_TAK 3 +#define IA64_ISR_CODE_LFETCH 4 +#define IA64_ISR_CODE_PROBEF 5 #endif /* _ASM_IA64_kREGS_H */ diff -Nru a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h --- a/include/asm-ia64/machvec.h Thu May 30 21:28:58 2002 +++ b/include/asm-ia64/machvec.h Thu May 30 21:28:58 2002 @@ -18,6 +18,7 @@ struct pt_regs; struct scatterlist; struct irq_desc; +struct page; typedef void ia64_mv_setup_t (char **); typedef void ia64_mv_cpu_init_t(void); @@ -45,6 +46,8 @@ typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int); typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int); typedef unsigned long ia64_mv_pci_dma_address (struct scatterlist *); +typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64); + /* * WARNING: The legacy I/O space is _architected_. Platforms are * expected to follow this architected model (see Section 10.7 in the @@ -101,6 +104,7 @@ # define platform_pci_dma_sync_single ia64_mv.sync_single # define platform_pci_dma_sync_sg ia64_mv.sync_sg # define platform_pci_dma_address ia64_mv.dma_address +# define platform_pci_dma_supported ia64_mv.dma_supported # define platform_irq_desc ia64_mv.irq_desc # define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq @@ -136,6 +140,7 @@ ia64_mv_pci_dma_sync_single *sync_single; ia64_mv_pci_dma_sync_sg *sync_sg; ia64_mv_pci_dma_address *dma_address; + ia64_mv_pci_dma_supported *dma_supported; ia64_mv_irq_desc *irq_desc; ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_local_vector_to_irq *local_vector_to_irq; @@ -172,6 +177,7 @@ platform_pci_dma_sync_single, \ platform_pci_dma_sync_sg, \ platform_pci_dma_address, \ + platform_pci_dma_supported, \ platform_irq_desc, \ platform_irq_to_vector, \ platform_local_vector_to_irq, \ @@ -268,6 +274,9 @@ #endif #ifndef platform_pci_dma_address # define platform_pci_dma_address swiotlb_dma_address +#endif +#ifndef platform_pci_dma_supported +# define platform_pci_dma_supported swiotlb_pci_dma_supported #endif #ifndef platform_irq_desc # define platform_irq_desc __ia64_irq_desc diff -Nru a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h --- a/include/asm-ia64/machvec_hpzx1.h Thu May 30 21:28:58 2002 +++ b/include/asm-ia64/machvec_hpzx1.h Thu May 30 21:28:58 2002 @@ -11,6 +11,7 @@ extern ia64_mv_pci_map_sg sba_map_sg; extern ia64_mv_pci_unmap_sg sba_unmap_sg; extern ia64_mv_pci_dma_address sba_dma_address; +extern ia64_mv_pci_dma_supported sba_dma_supported; /* * This stuff has dual use! @@ -33,42 +34,6 @@ #define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop) #define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop) #define platform_pci_dma_address sba_dma_address - -#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ -#ifndef _ASM_IA64_MACHVEC_HPZX1_h -#define _ASM_IA64_MACHVEC_HPZX1_h - -extern ia64_mv_setup_t dig_setup; -extern ia64_mv_pci_fixup_t hpzx1_pci_fixup; -extern ia64_mv_map_nr_t map_nr_dense; -extern ia64_mv_pci_alloc_consistent sba_alloc_consistent; -extern ia64_mv_pci_free_consistent sba_free_consistent; -extern ia64_mv_pci_map_single sba_map_single; -extern ia64_mv_pci_unmap_single sba_unmap_single; -extern ia64_mv_pci_map_sg sba_map_sg; -extern ia64_mv_pci_unmap_sg sba_unmap_sg; -extern ia64_mv_pci_dma_address sba_dma_address; - -/* - * This stuff has dual use! - * - * For a generic kernel, the macros are used to initialize the - * platform's machvec structure. When compiling a non-generic kernel, - * the macros are used directly. - */ -#define platform_name "hpzx1" -#define platform_setup dig_setup -#define platform_pci_fixup hpzx1_pci_fixup -#define platform_map_nr map_nr_dense -#define platform_pci_dma_init ((ia64_mv_pci_dma_init *) machvec_noop) -#define platform_pci_alloc_consistent sba_alloc_consistent -#define platform_pci_free_consistent sba_free_consistent -#define platform_pci_map_single sba_map_single -#define platform_pci_unmap_single sba_unmap_single -#define platform_pci_map_sg sba_map_sg -#define platform_pci_unmap_sg sba_unmap_sg -#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop) -#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop) -#define platform_pci_dma_address sba_dma_address +#define platform_pci_dma_supported sba_dma_supported #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff -Nru a/include/asm-ia64/offsets.h b/include/asm-ia64/offsets.h --- a/include/asm-ia64/offsets.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/offsets.h Thu May 30 21:28:59 2002 @@ -6,6 +6,8 @@ * This file was generated by arch/ia64/tools/print_offsets.awk. * */ + +#define CLONE_IDLETASK_BIT 12 #define IA64_TASK_SIZE 3952 /* 0xf70 */ #define IA64_THREAD_INFO_SIZE 32 /* 0x20 */ #define IA64_PT_REGS_SIZE 400 /* 0x190 */ @@ -16,6 +18,7 @@ #define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */ #define IA64_TASK_THREAD_KSP_OFFSET 1496 /* 0x5d8 */ +#define IA64_TASK_PID_OFFSET 212 /* 0xd4 */ #define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */ #define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */ #define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */ @@ -106,6 +109,7 @@ #define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */ #define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544 /* 0x220 */ #define IA64_SWITCH_STACK_PR_OFFSET 552 /* 0x228 */ +#define IA64_SIGCONTEXT_IP_OFFSET 40 /* 0x28 */ #define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */ #define IA64_SIGCONTEXT_AR_FPSR_OFFSET 104 /* 0x68 */ #define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */ diff -Nru a/include/asm-ia64/page.h b/include/asm-ia64/page.h --- a/include/asm-ia64/page.h Thu May 30 21:28:58 2002 +++ b/include/asm-ia64/page.h Thu May 30 21:28:58 2002 @@ -56,32 +56,12 @@ flush_dcache_page(page); \ } while (0) -/* - * Note: the MAP_NR_*() macro can't use __pa() because MAP_NR_*(X) MUST - * map to something >= max_mapnr if X is outside the identity mapped - * kernel space. - */ - -/* - * The dense variant can be used as long as the size of memory holes isn't - * very big. - */ -#define MAP_NR_DENSE(addr) (((unsigned long) (addr) - PAGE_OFFSET) >> PAGE_SHIFT) - -#ifdef CONFIG_IA64_GENERIC -# include -# define virt_to_page(kaddr) (mem_map + platform_map_nr(kaddr)) -# define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) -#elif defined (CONFIG_IA64_SGI_SN1) -# ifndef CONFIG_DISCONTIGMEM -# define virt_to_page(kaddr) (mem_map + MAP_NR_DENSE(kaddr)) -# define page_to_phys(page) XXX fix me -# endif -#else -# define virt_to_page(kaddr) (mem_map + MAP_NR_DENSE(kaddr)) -# define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) -#endif -#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_pfn(page) ((unsigned long) (page - mem_map)) +#define pfn_to_page(pfn) (mem_map + (pfn)) +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) typedef union ia64_va { struct { @@ -105,7 +85,7 @@ #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) #define REGION_SIZE REGION_NUMBER(1) -#define REGION_KERNEL 7 +#define REGION_KERNEL 7 #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0) #define PAGE_BUG(page) do { BUG(); } while (0) diff -Nru a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h --- a/include/asm-ia64/pci.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/pci.h Thu May 30 21:28:59 2002 @@ -58,6 +58,7 @@ #define pci_dma_sync_single platform_pci_dma_sync_single #define pci_dma_sync_sg platform_pci_dma_sync_sg #define sg_dma_address platform_pci_dma_address +#define pci_dma_supported platform_pci_dma_supported /* pci_unmap_{single,page} is not a nop, thus... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ @@ -72,17 +73,6 @@ ((PTR)->LEN_NAME) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) - -/* - * Return whether the given PCI device DMA address mask can be supported properly. For - * example, if your device can only drive the low 24-bits during PCI bus mastering, then - * you would pass 0x00ffffff as the mask to this function. - */ -static inline int -pci_dma_supported (struct pci_dev *hwdev, u64 mask) -{ - return 1; -} #define pci_map_page(dev,pg,off,size,dir) \ pci_map_single((dev), page_address(pg) + (off), (size), (dir)) diff -Nru a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h --- a/include/asm-ia64/perfmon.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/perfmon.h Thu May 30 21:28:59 2002 @@ -23,6 +23,7 @@ #define PFM_GET_FEATURES 0x0c #define PFM_DEBUG 0x0d #define PFM_UNPROTECT_CONTEXT 0x0e +#define PFM_GET_PMC_RESET_VAL 0x0f /* @@ -171,7 +172,7 @@ extern int pfm_release_debug_registers(struct task_struct *); extern int pfm_cleanup_smpl_buf(struct task_struct *); extern void pfm_syst_wide_update_task(struct task_struct *, int); -extern void pfm_ovfl_block_reset (void); +extern void pfm_ovfl_block_reset(void); #endif /* __KERNEL__ */ diff -Nru a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h --- a/include/asm-ia64/pgalloc.h Thu May 30 21:28:58 2002 +++ b/include/asm-ia64/pgalloc.h Thu May 30 21:28:58 2002 @@ -106,6 +106,8 @@ ++pgtable_cache_size; } +#define pmd_free_tlb(tlb, pmd) pmd_free(pmd) + static inline void pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte) { @@ -149,6 +151,8 @@ { free_page((unsigned long) pte); } + +#define pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) extern void check_pgt_cache (void); diff -Nru a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h --- a/include/asm-ia64/pgtable.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/pgtable.h Thu May 30 21:28:59 2002 @@ -207,20 +207,16 @@ #define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) /* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. + * Conversion functions: convert page frame number (pfn) and a protection value to a page + * table entry (pte). */ -#define mk_pte(page,pgprot) \ -({ \ - pte_t __pte; \ - \ - pte_val(__pte) = (page_to_phys(page)) | pgprot_val(pgprot); \ - __pte; \ -}) +#define pfn_pte(pfn, pgprot) \ +({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; }) -/* This takes a physical page address that is used by the remapping functions */ -#define mk_pte_phys(physpage, pgprot) \ -({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) +/* Extract pfn from pte. */ +#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT) + +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define pte_modify(_pte, newprot) \ (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) diff -Nru a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h --- a/include/asm-ia64/processor.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/processor.h Thu May 30 21:28:59 2002 @@ -55,123 +55,6 @@ #define MCA_bus 0 #define MCA_bus__is_a_macro /* for versions in ksyms.c */ -/* Processor status register bits: */ -#define IA64_PSR_BE_BIT 1 -#define IA64_PSR_UP_BIT 2 -#define IA64_PSR_AC_BIT 3 -#define IA64_PSR_MFL_BIT 4 -#define IA64_PSR_MFH_BIT 5 -#define IA64_PSR_IC_BIT 13 -#define IA64_PSR_I_BIT 14 -#define IA64_PSR_PK_BIT 15 -#define IA64_PSR_DT_BIT 17 -#define IA64_PSR_DFL_BIT 18 -#define IA64_PSR_DFH_BIT 19 -#define IA64_PSR_SP_BIT 20 -#define IA64_PSR_PP_BIT 21 -#define IA64_PSR_DI_BIT 22 -#define IA64_PSR_SI_BIT 23 -#define IA64_PSR_DB_BIT 24 -#define IA64_PSR_LP_BIT 25 -#define IA64_PSR_TB_BIT 26 -#define IA64_PSR_RT_BIT 27 -/* The following are not affected by save_flags()/restore_flags(): */ -#define IA64_PSR_CPL0_BIT 32 -#define IA64_PSR_CPL1_BIT 33 -#define IA64_PSR_IS_BIT 34 -#define IA64_PSR_MC_BIT 35 -#define IA64_PSR_IT_BIT 36 -#define IA64_PSR_ID_BIT 37 -#define IA64_PSR_DA_BIT 38 -#define IA64_PSR_DD_BIT 39 -#define IA64_PSR_SS_BIT 40 -#define IA64_PSR_RI_BIT 41 -#define IA64_PSR_ED_BIT 43 -#define IA64_PSR_BN_BIT 44 - -#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT) -#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT) -#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT) -#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT) -#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT) -#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT) -#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT) -#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT) -#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT) -#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT) -#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT) -#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT) -#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT) -#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT) -#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT) -#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT) -#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT) -#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT) -#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT) -/* The following are not affected by save_flags()/restore_flags(): */ -#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT) -#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT) -#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT) -#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT) -#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT) -#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT) -#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT) -#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT) -#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT) -#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT) - -/* User mask bits: */ -#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH) - -/* Default Control Register */ -#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */ -#define IA64_DCR_BE_BIT 1 /* big-endian default */ -#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */ -#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */ -#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */ -#define IA64_DCR_DK_BIT 10 /* defer key miss faults */ -#define IA64_DCR_DX_BIT 11 /* defer key permission faults */ -#define IA64_DCR_DR_BIT 12 /* defer access right faults */ -#define IA64_DCR_DA_BIT 13 /* defer access bit faults */ -#define IA64_DCR_DD_BIT 14 /* defer debug faults */ - -#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT) -#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT) -#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT) -#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT) -#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT) -#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT) -#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT) -#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT) -#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT) -#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT) - -/* Interrupt Status Register */ -#define IA64_ISR_X_BIT 32 /* execute access */ -#define IA64_ISR_W_BIT 33 /* write access */ -#define IA64_ISR_R_BIT 34 /* read access */ -#define IA64_ISR_NA_BIT 35 /* non-access */ -#define IA64_ISR_SP_BIT 36 /* speculative load exception */ -#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */ -#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */ -#define IA64_ISR_CODE_MASK 0xf - -#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT) -#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT) -#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT) -#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT) -#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT) -#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT) -#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT) - -/* ISR code field for non-access instructions */ -#define IA64_ISR_CODE_TPA 0 -#define IA64_ISR_CODE_FC 1 -#define IA64_ISR_CODE_PROBE 2 -#define IA64_ISR_CODE_TAK 3 -#define IA64_ISR_CODE_LFETCH 4 -#define IA64_ISR_CODE_PROBEF 5 - #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ @@ -290,8 +173,6 @@ __u64 ipi_count; __u64 prof_counter; __u64 prof_multiplier; - __u32 pfm_syst_wide; - __u32 pfm_dcr_pp; #endif } cpu_info __per_cpu_data; @@ -632,14 +513,22 @@ asm volatile ("invala" ::: "memory"); } +static inline __u64 +ia64_clear_ic (void) +{ + __u64 psr; + asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory"); + return psr; +} + /* - * Save the processor status flags in FLAGS and then clear the interrupt collection and - * interrupt enable bits. Don't trigger any mandatory RSE references while this bit is - * off! + * Restore the psr. */ -#define ia64_clear_ic(flags) \ - asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" \ - : "=r"(flags) :: "memory"); +static inline void +ia64_set_psr (__u64 psr) +{ + asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory"); +} /* * Insert a translation into an instruction and/or data translation diff -Nru a/include/asm-ia64/system.h b/include/asm-ia64/system.h --- a/include/asm-ia64/system.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/system.h Thu May 30 21:28:59 2002 @@ -13,7 +13,9 @@ * Copyright (C) 1999 Don Dugger */ #include +#include +#include #include #define KERNEL_START (PAGE_OFFSET + 68*1024*1024) @@ -30,7 +32,7 @@ __u16 bus; /* PCI Bus number */ __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ - __u8 irq; /* IRQ assigned */ + __u32 irq; /* IRQ assigned */ }; extern struct ia64_boot_param { @@ -135,16 +137,21 @@ } \ } while (0) -# define local_irq_restore(x) \ -do { \ - unsigned long ip, old_psr, psr = (x); \ - \ - __asm__ __volatile__ (";;mov %0=psr; mov psr.l=%1;; srlz.d" \ - : "=&r" (old_psr) : "r" (psr) : "memory"); \ - if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \ - __asm__ ("mov %0=ip" : "=r"(ip)); \ - last_cli_ip = ip; \ - } \ +# define local_irq_restore(x) \ +do { \ + unsigned long ip, old_psr, psr = (x); \ + \ + __asm__ __volatile__ ("mov %0=psr;" \ + "cmp.ne p6,p7=%1,r0;;" \ + "(p6) ssm psr.i;" \ + "(p7) rsm psr.i;;" \ + "srlz.d" \ + : "=&r" (old_psr) : "r"((psr) & IA64_PSR_I) \ + : "p6", "p7", "memory"); \ + if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) { \ + __asm__ ("mov %0=ip" : "=r"(ip)); \ + last_cli_ip = ip; \ + } \ } while (0) #else /* !CONFIG_IA64_DEBUG_IRQ */ @@ -153,8 +160,12 @@ : "=r" (x) :: "memory") # define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") /* (potentially) setting psr.i requires data serialization: */ -# define local_irq_restore(x) __asm__ __volatile__ (";; mov psr.l=%0;; srlz.d" \ - :: "r" (x) : "memory") +# define local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \ + "(p6) ssm psr.i;" \ + "(p7) rsm psr.i;;" \ + "srlz.d" \ + :: "r"((x) & IA64_PSR_I) \ + : "p6", "p7", "memory") #endif /* !CONFIG_IA64_DEBUG_IRQ */ #define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory") @@ -376,7 +387,8 @@ extern void ia64_load_extra (struct task_struct *task); #if defined(CONFIG_SMP) && defined(CONFIG_PERFMON) -# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_wide != 0) + extern int __per_cpu_data pfm_syst_wide; +# define PERFMON_IS_SYSWIDE() (this_cpu(pfm_syst_wide) != 0) #else # define PERFMON_IS_SYSWIDE() (0) #endif diff -Nru a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h --- a/include/asm-ia64/tlb.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/tlb.h Thu May 30 21:28:59 2002 @@ -1 +1,179 @@ -#include +#ifndef _ASM_IA64_TLB_H +#define _ASM_IA64_TLB_H +/* + * Copyright (C) 2002 Hewlett-Packard Co + * David Mosberger-Tang + * + * This file was derived from asm-generic/tlb.h. + */ +/* + * Removing a translation from a page table (including TLB-shootdown) is a four-step + * procedure: + * + * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory + * (this is a no-op on ia64). + * (2) Clear the relevant portions of the page-table + * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs + * (4) Release the pages that were freed up in step (2). + * + * Note that the ordering of these steps is crucial to avoid races on MP machines. + * + * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When + * unmapping a portion of the virtual address space, these hooks are called according to + * the following template: + * + * tlb <- tlb_gather_mmu(mm); // start unmap for address space MM + * { + * for each vma that needs a shootdown do { + * tlb_start_vma(tlb, vma); + * for each page-table-entry PTE that needs to be removed do { + * tlb_remove_tlb_entry(tlb, pte, address); + * if (pte refers to a normal page) { + * tlb_remove_page(tlb, page); + * } + * } + * tlb_end_vma(tlb, vma); + * } + * } + * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM + */ +#include +#include + +#include +#include + +#ifdef CONFIG_SMP +# define FREE_PTE_NR 2048 +# define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL) +#else +# define FREE_PTE_NR 0 +# define tlb_fast_mode(tlb) (1) +#endif + +typedef struct { + struct mm_struct *mm; + unsigned long nr; /* == ~0UL => fast mode */ + unsigned long freed; /* number of pages freed */ + unsigned long start_addr; + unsigned long end_addr; + struct page *pages[FREE_PTE_NR]; +} mmu_gather_t; + +/* Users of the generic TLB shootdown code must declare this storage space. */ +extern mmu_gather_t mmu_gathers[NR_CPUS]; + +/* + * Flush the TLB for address range START to END and, if not in fast mode, release the + * freed pages that where gathered up to this point. + */ +static inline void +ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end) +{ + unsigned long nr; + + if (end - start >= 1024*1024*1024*1024UL) { + /* + * If we flush more than a tera-byte, we're probably better off just + * flushing the entire address space. + */ + flush_tlb_mm(tlb->mm); + } else { + /* + * XXX fix me: flush_tlb_range() should take an mm pointer instead of a + * vma pointer. + */ + struct vm_area_struct vma; + + vma.vm_mm = tlb->mm; + /* flush the address range from the tlb: */ + flush_tlb_range(&vma, start, end); + /* now flush the virt. page-table area mapping the address range: */ + flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); + } + + /* lastly, release the freed pages */ + nr = tlb->nr; + if (!tlb_fast_mode(tlb)) { + unsigned long i; + tlb->nr = 0; + tlb->start_addr = ~0UL; + for (i = 0; i < nr; ++i) + free_page_and_swap_cache(tlb->pages[i]); + } +} + +/* + * Return a pointer to an initialized mmu_gather_t. + */ +static inline mmu_gather_t * +tlb_gather_mmu (struct mm_struct *mm) +{ + mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()]; + + tlb->mm = mm; + tlb->freed = 0; + tlb->start_addr = ~0UL; + + /* Use fast mode if only one CPU is online */ + tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL; + return tlb; +} + +/* + * Called at the end of the shootdown operation to free up any resources that were + * collected. The page table lock is still held at this point. + */ +static inline void +tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end) +{ + unsigned long freed = tlb->freed; + struct mm_struct *mm = tlb->mm; + unsigned long rss = mm->rss; + + if (rss < freed) + freed = rss; + mm->rss = rss - freed; + /* + * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and + * tlb->end_addr. + */ + ia64_tlb_flush_mmu(tlb, start, end); + + /* keep the page table cache within bounds */ + check_pgt_cache(); +} + +/* + * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any + * PTE, not just those pointing to (normal) physical memory. + */ +static inline void +tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t pte, unsigned long address) +{ + if (tlb->start_addr == ~0UL) + tlb->start_addr = address; + tlb->end_addr = address + PAGE_SIZE; +} + +/* + * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page + * must be delayed until after the TLB has been flushed (see comments at the beginning of + * this file). + */ +static inline void +tlb_remove_page (mmu_gather_t *tlb, struct page *page) +{ + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + return; + } + tlb->pages[tlb->nr++] = page; + if (tlb->nr >= FREE_PTE_NR) + ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); +} + +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) + +#endif /* _ASM_IA64_TLB_H */ diff -Nru a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h --- a/include/asm-ia64/tlbflush.h Thu May 30 21:28:59 2002 +++ b/include/asm-ia64/tlbflush.h Thu May 30 21:28:59 2002 @@ -70,12 +70,10 @@ static inline void flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) { - struct vm_area_struct vma; - - if (REGION_NUMBER(start) != REGION_NUMBER(end)) - printk("flush_tlb_pgtables: can't flush across regions!!\n"); - vma.vm_mm = mm; - flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); + /* + * Deprecated. The virtual page table is now flushed via the normal gather/flush + * interface (see tlb.h). + */ } #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ diff -Nru a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h --- a/include/asm-ia64/unistd.h Thu May 30 21:28:58 2002 +++ b/include/asm-ia64/unistd.h Thu May 30 21:28:58 2002 @@ -222,6 +222,7 @@ #define __NR_futex 1230 #define __NR_sched_setaffinity 1231 #define __NR_sched_getaffinity 1232 +#define __NR_security 1233 #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) diff -Nru a/include/linux/acpi.h b/include/linux/acpi.h --- a/include/linux/acpi.h Thu May 30 21:28:58 2002 +++ b/include/linux/acpi.h Thu May 30 21:28:58 2002 @@ -29,6 +29,8 @@ #define _LINUX #endif +#include + /* * YES this is ugly. * But, moving all of ACPI's private headers to include/acpi isn't the right diff -Nru a/include/linux/agp_backend.h b/include/linux/agp_backend.h --- a/include/linux/agp_backend.h Thu May 30 21:28:59 2002 +++ b/include/linux/agp_backend.h Thu May 30 21:28:59 2002 @@ -52,6 +52,7 @@ INTEL_I845, INTEL_I850, INTEL_I860, + INTEL_460GX, VIA_GENERIC, VIA_VP3, VIA_MVP3, @@ -117,6 +118,7 @@ size_t page_count; int num_scratch_pages; unsigned long *memory; + void *vmptr; off_t pg_start; u32 type; u32 physical; diff -Nru a/include/linux/fs.h b/include/linux/fs.h --- a/include/linux/fs.h Thu May 30 21:28:59 2002 +++ b/include/linux/fs.h Thu May 30 21:28:59 2002 @@ -506,12 +506,17 @@ extern int init_private_file(struct file *, struct dentry *, int); +/* Max fileoffset that can safely be dealt with by filesystems that have not (yet) been + audited for 64-bit issues. */ #define MAX_NON_LFS ((1UL<<31) - 1) -/* Page cache limit. The filesystems should put that into their s_maxbytes - limits, otherwise bad things can happen in VM. */ +/* Max fileoffset that can be stored in a variable of type offset_t. */ +#define MAX_OFF_T ((loff_t)((1UL << ((sizeof(off_t)*8) - 1)) - 1)) + +/* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE 0x7fffffffffffffff #endif diff -Nru a/include/linux/highmem.h b/include/linux/highmem.h --- a/include/linux/highmem.h Thu May 30 21:28:58 2002 +++ b/include/linux/highmem.h Thu May 30 21:28:58 2002 @@ -77,7 +77,7 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_atomic(page, KM_USER0); - clear_user_page(addr, vaddr); + clear_user_page(addr, vaddr, page); kunmap_atomic(addr, KM_USER0); } @@ -111,7 +111,7 @@ vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); - copy_user_page(vto, vfrom, vaddr); + copy_user_page(vto, vfrom, vaddr, to); kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); } diff -Nru a/include/linux/irq.h b/include/linux/irq.h --- a/include/linux/irq.h Thu May 30 21:28:59 2002 +++ b/include/linux/irq.h Thu May 30 21:28:59 2002 @@ -56,15 +56,13 @@ * * Pad this out to 32 bytes for cache and indexing reasons. */ -typedef struct { +typedef struct irq_desc { unsigned int status; /* IRQ status */ hw_irq_controller *handler; struct irqaction *action; /* IRQ action list */ unsigned int depth; /* nested irq disables */ spinlock_t lock; } ____cacheline_aligned irq_desc_t; - -extern irq_desc_t irq_desc [NR_IRQS]; #include /* the arch dependent stuff */ diff -Nru a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h --- a/include/linux/irq_cpustat.h Thu May 30 21:28:59 2002 +++ b/include/linux/irq_cpustat.h Thu May 30 21:28:59 2002 @@ -24,16 +24,32 @@ #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #else #define __IRQ_STAT(cpu, member) ((void)(cpu), irq_stat[0].member) -#endif +#endif #endif /* arch independent irq_stat fields */ #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) -#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) -#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) +#define irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) +#define bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ + +#define local_hardirq_trylock() hardirq_trylock(smp_processor_id()) +#define local_hardirq_endlock() hardirq_trylock(smp_processor_id()) +#define local_irq_enter(irq) irq_enter(smp_processor_id(), (irq)) +#define local_irq_exit(irq) irq_exit(smp_processor_id(), (irq)) +#define local_softirq_pending() softirq_pending(smp_processor_id()) +#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id()) + +/* These will lose the "really_" prefix when the interim macros below are removed. */ +#define really_local_irq_count()bh_count(smp_processor_id()) +#define really_local_bh_count() bh_count(smp_processor_id()) + +/* Interim macros for backward compatibility. They are deprecated. Use irq_count() and + bh_count() instead. --davidm 01/11/28 */ +#define local_irq_count(cpu) irq_count(cpu) +#define local_bh_count(cpu) bh_count(cpu) #endif /* __irq_cpustat_h */ diff -Nru a/include/linux/kernel.h b/include/linux/kernel.h --- a/include/linux/kernel.h Thu May 30 21:28:58 2002 +++ b/include/linux/kernel.h Thu May 30 21:28:58 2002 @@ -37,6 +37,13 @@ #define KERN_INFO "<6>" /* informational */ #define KERN_DEBUG "<7>" /* debug-level messages */ +extern int console_printk[]; + +#define console_loglevel (console_printk[0]) +#define default_message_loglevel (console_printk[1]) +#define minimum_console_loglevel (console_printk[2]) +#define default_console_loglevel (console_printk[3]) + struct completion; extern struct notifier_block *panic_notifier_list; @@ -72,8 +79,6 @@ asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); - -extern int console_loglevel; static inline void console_silent(void) { diff -Nru a/include/linux/percpu.h b/include/linux/percpu.h --- a/include/linux/percpu.h Thu May 30 21:28:59 2002 +++ b/include/linux/percpu.h Thu May 30 21:28:59 2002 @@ -2,11 +2,11 @@ #define __LINUX_PERCPU_H #include -#ifdef CONFIG_SMP #define __per_cpu_data __attribute__((section(".data.percpu"))) + +#ifdef CONFIG_SMP #include #else -#define __per_cpu_data #define per_cpu(var, cpu) var #define this_cpu(var) var #endif diff -Nru a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h Thu May 30 21:28:58 2002 +++ b/include/linux/sched.h Thu May 30 21:28:58 2002 @@ -430,14 +430,14 @@ #ifndef INIT_THREAD_SIZE # define INIT_THREAD_SIZE 2048*sizeof(long) -#endif - union thread_union { struct thread_info thread_info; unsigned long stack[INIT_THREAD_SIZE/sizeof(long)]; }; extern union thread_union init_thread_union; +#endif + extern struct task_struct init_task; extern struct mm_struct init_mm; diff -Nru a/include/linux/smp.h b/include/linux/smp.h --- a/include/linux/smp.h Thu May 30 21:28:59 2002 +++ b/include/linux/smp.h Thu May 30 21:28:59 2002 @@ -37,11 +37,6 @@ extern void smp_boot_cpus(void); /* - * Processor call in. Must hold processors until .. - */ -extern void smp_callin(void); - -/* * Multiprocessors may now schedule */ extern void smp_commence(void); @@ -55,14 +50,10 @@ /* * True once the per process idle is forked */ -extern int smp_threads_ready; +extern volatile int smp_threads_ready; extern int smp_num_cpus; -extern volatile unsigned long smp_msg_data; -extern volatile int smp_src_cpu; -extern volatile int smp_msg_id; - #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 @@ -92,9 +83,6 @@ #define cpu_online_map 1 static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_all(void) { } -#define __per_cpu_data -#define per_cpu(var, cpu) var -#define this_cpu(var) var #endif /* !SMP */ diff -Nru a/kernel/exec_domain.c b/kernel/exec_domain.c --- a/kernel/exec_domain.c Thu May 30 21:28:59 2002 +++ b/kernel/exec_domain.c Thu May 30 21:28:59 2002 @@ -196,8 +196,10 @@ put_exec_domain(oep); +#if 0 printk(KERN_DEBUG "[%s:%d]: set personality to %lx\n", current->comm, current->pid, personality); +#endif return 0; } diff -Nru a/kernel/fork.c b/kernel/fork.c --- a/kernel/fork.c Thu May 30 21:28:58 2002 +++ b/kernel/fork.c Thu May 30 21:28:58 2002 @@ -98,6 +98,13 @@ init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; } +#if 1 + +extern struct task_struct *dup_task_struct(struct task_struct *orig); +extern void __put_task_struct(struct task_struct *tsk); + +#else + struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; @@ -126,6 +133,8 @@ free_thread_info(tsk->thread_info); kmem_cache_free(task_struct_cachep,tsk); } + +#endif /* Protects next_safe and last_pid. */ spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED; diff -Nru a/kernel/ksyms.c b/kernel/ksyms.c --- a/kernel/ksyms.c Thu May 30 21:28:58 2002 +++ b/kernel/ksyms.c Thu May 30 21:28:58 2002 @@ -384,7 +384,7 @@ EXPORT_SYMBOL(del_timer); EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); -#if !defined(CONFIG_ARCH_S390) +#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_IA64) EXPORT_SYMBOL(irq_stat); /* No separate irq_stat for s390, it is part of PSA */ #endif @@ -591,7 +591,9 @@ /* init task, for moving kthread roots - ought to export a function ?? */ EXPORT_SYMBOL(init_task); +#ifndef CONFIG_IA64 EXPORT_SYMBOL(init_thread_union); +#endif EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(pidhash); diff -Nru a/kernel/printk.c b/kernel/printk.c --- a/kernel/printk.c Thu May 30 21:28:59 2002 +++ b/kernel/printk.c Thu May 30 21:28:59 2002 @@ -16,6 +16,7 @@ * 01Mar01 Andrew Morton */ +#include #include #include #include @@ -52,11 +53,12 @@ DECLARE_WAIT_QUEUE_HEAD(log_wait); -/* Keep together for sysctl support */ -int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL; -int default_message_loglevel = DEFAULT_MESSAGE_LOGLEVEL; -int minimum_console_loglevel = MINIMUM_CONSOLE_LOGLEVEL; -int default_console_loglevel = DEFAULT_CONSOLE_LOGLEVEL; +int console_printk[4] = { + DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ + DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ + MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */ + DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ +}; int oops_in_progress; @@ -323,6 +325,12 @@ __call_console_drivers(start, end); } } +#ifdef CONFIG_IA64_EARLY_PRINTK + if (!console_drivers) { + static void early_printk (const char *str, size_t len); + early_printk(&LOG_BUF(start), end - start); + } +#endif } /* @@ -682,3 +690,50 @@ tty->driver.write(tty, 0, msg, strlen(msg)); return; } + +#ifdef CONFIG_IA64_EARLY_PRINTK + +#include + +#define VGABASE ((char *)0xc0000000000b8000) +#define VGALINES 24 +#define VGACOLS 80 + +static int current_ypos = VGALINES, current_xpos = 0; + +void +early_printk (const char *str, size_t len) +{ + char c; + int i, k, j; + + while (len-- > 0) { + c = *str++; + if (current_ypos >= VGALINES) { + /* scroll 1 line up */ + for (k = 1, j = 0; k < VGALINES; k++, j++) { + for (i = 0; i < VGACOLS; i++) { + writew(readw(VGABASE + 2*(VGACOLS*k + i)), + VGABASE + 2*(VGACOLS*j + i)); + } + } + for (i = 0; i < VGACOLS; i++) { + writew(0x720, VGABASE + 2*(VGACOLS*j + i)); + } + current_ypos = VGALINES-1; + } + if (c == '\n') { + current_xpos = 0; + current_ypos++; + } else if (c != '\r') { + writew(((0x7 << 8) | (unsigned short) c), + VGABASE + 2*(VGACOLS*current_ypos + current_xpos++)); + if (current_xpos >= VGACOLS) { + current_xpos = 0; + current_ypos++; + } + } + } +} + +#endif /* CONFIG_IA64_EARLY_PRINTK */ diff -Nru a/kernel/sched.c b/kernel/sched.c --- a/kernel/sched.c Thu May 30 21:28:59 2002 +++ b/kernel/sched.c Thu May 30 21:28:59 2002 @@ -663,7 +663,7 @@ task_t *p = current; if (p == rq->idle) { - if (local_bh_count(cpu) || local_irq_count(cpu) > 1) + if (really_local_bh_count() || really_local_irq_count() > 1) kstat.per_cpu_system[cpu] += system; #if CONFIG_SMP idle_tick(); diff -Nru a/kernel/softirq.c b/kernel/softirq.c --- a/kernel/softirq.c Thu May 30 21:28:59 2002 +++ b/kernel/softirq.c Thu May 30 21:28:59 2002 @@ -41,7 +41,10 @@ - Bottom halves: globally serialized, grr... */ +/* No separate irq_stat for ia64, it is part of PSA */ +#if !defined(CONFIG_IA64) irq_cpustat_t irq_stat[NR_CPUS]; +#endif /* CONFIG_IA64 */ static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; @@ -61,7 +64,6 @@ asmlinkage void do_softirq() { - int cpu = smp_processor_id(); __u32 pending; long flags; __u32 mask; @@ -71,7 +73,7 @@ local_irq_save(flags); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending) { struct softirq_action *h; @@ -80,7 +82,7 @@ local_bh_disable(); restart: /* Reset the pending bitmask before enabling irqs */ - softirq_pending(cpu) = 0; + local_softirq_pending() = 0; local_irq_enable(); @@ -95,7 +97,7 @@ local_irq_disable(); - pending = softirq_pending(cpu); + pending = local_softirq_pending(); if (pending & mask) { mask &= ~pending; goto restart; @@ -103,7 +105,7 @@ __local_bh_enable(); if (pending) - wakeup_softirqd(cpu); + wakeup_softirqd(smp_processor_id()); } local_irq_restore(flags); @@ -125,7 +127,7 @@ * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ - if (!(local_irq_count(cpu) | local_bh_count(cpu))) + if (!(irq_count(cpu) | bh_count(cpu))) wakeup_softirqd(cpu); } @@ -288,18 +290,16 @@ static void bh_action(unsigned long nr) { - int cpu = smp_processor_id(); - if (!spin_trylock(&global_bh_lock)) goto resched; - if (!hardirq_trylock(cpu)) + if (!local_hardirq_trylock()) goto resched_unlock; if (bh_base[nr]) bh_base[nr](); - hardirq_endlock(cpu); + local_hardirq_endlock(); spin_unlock(&global_bh_lock); return; @@ -379,15 +379,15 @@ __set_current_state(TASK_INTERRUPTIBLE); mb(); - ksoftirqd_task(cpu) = current; + local_ksoftirqd_task() = current; for (;;) { - if (!softirq_pending(cpu)) + if (!local_softirq_pending()) schedule(); __set_current_state(TASK_RUNNING); - while (softirq_pending(cpu)) { + while (local_softirq_pending()) { do_softirq(); cond_resched(); } diff -Nru a/kernel/time.c b/kernel/time.c --- a/kernel/time.c Thu May 30 21:28:59 2002 +++ b/kernel/time.c Thu May 30 21:28:59 2002 @@ -39,6 +39,7 @@ /* The xtime_lock is not only serializing the xtime read/writes but it's also serializing all accesses to the global NTP variables now. */ extern rwlock_t xtime_lock; +extern unsigned long last_time_offset; #if !defined(__alpha__) && !defined(__ia64__) @@ -82,6 +83,7 @@ write_lock_irq(&xtime_lock); xtime.tv_sec = value; xtime.tv_usec = 0; + last_time_offset = 0; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; @@ -127,6 +129,7 @@ { write_lock_irq(&xtime_lock); xtime.tv_sec += sys_tz.tz_minuteswest * 60; + last_time_offset = 0; write_unlock_irq(&xtime_lock); } @@ -386,6 +389,7 @@ txc->calcnt = pps_calcnt; txc->errcnt = pps_errcnt; txc->stbcnt = pps_stbcnt; + last_time_offset = 0; write_unlock_irq(&xtime_lock); do_gettimeofday(&txc->time); return(result); diff -Nru a/kernel/timer.c b/kernel/timer.c --- a/kernel/timer.c Thu May 30 21:28:59 2002 +++ b/kernel/timer.c Thu May 30 21:28:59 2002 @@ -640,6 +640,7 @@ * This spinlock protect us from races in SMP while playing with xtime. -arca */ rwlock_t xtime_lock = RW_LOCK_UNLOCKED; +unsigned long last_time_offset; static inline void update_times(void) { @@ -657,6 +658,7 @@ wall_jiffies += ticks; update_wall_time(ticks); } + last_time_offset = 0; write_unlock_irq(&xtime_lock); calc_load(ticks); } @@ -890,7 +892,7 @@ if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0) return -EINVAL; - +#if !defined(__ia64__) if (t.tv_sec == 0 && t.tv_nsec <= 2000000L && current->policy != SCHED_OTHER) { @@ -903,6 +905,7 @@ udelay((t.tv_nsec + 999) / 1000); return 0; } +#endif expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); diff -Nru a/lib/brlock.c b/lib/brlock.c --- a/lib/brlock.c Thu May 30 21:28:58 2002 +++ b/lib/brlock.c Thu May 30 21:28:58 2002 @@ -18,7 +18,7 @@ #ifdef __BRLOCK_USE_ATOMICS brlock_read_lock_t __brlock_array[NR_CPUS][__BR_IDX_MAX] = - { [0 ... NR_CPUS-1] = { [0 ... __BR_IDX_MAX-1] = RW_LOCK_UNLOCKED } }; + { [0 ... NR_CPUS-1] = { [0 ... __BR_IDX_MAX-1] = {0, 0} } }; void __br_write_lock (enum brlock_indices idx) { diff -Nru a/mm/filemap.c b/mm/filemap.c --- a/mm/filemap.c Thu May 30 21:28:59 2002 +++ b/mm/filemap.c Thu May 30 21:28:59 2002 @@ -33,6 +33,7 @@ #include #include +#include /* * Shared mappings implemented 30.11.1994. It's not fully working yet, @@ -1300,6 +1301,9 @@ return written; } +#ifndef __NR_sendfile64 +inline +#endif static ssize_t common_sendfile(int out_fd, int in_fd, loff_t *offset, size_t count, loff_t max) { ssize_t retval; @@ -1394,12 +1398,13 @@ pos = off; ppos = &pos; } - ret = common_sendfile(out_fd, in_fd, ppos, count, MAX_NON_LFS); + ret = common_sendfile(out_fd, in_fd, ppos, count, MAX_OFF_T); if (offset && put_user(pos, offset)) ret = -EFAULT; return ret; } +#ifdef __NR_sendfile64 asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t count) { loff_t pos, *ppos = NULL; @@ -1414,6 +1419,7 @@ ret = -EFAULT; return ret; } +#endif static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr) { diff -Nru a/mm/memory.c b/mm/memory.c --- a/mm/memory.c Thu May 30 21:28:59 2002 +++ b/mm/memory.c Thu May 30 21:28:59 2002 @@ -106,7 +106,7 @@ pmd = pmd_offset(dir, 0); pgd_clear(dir); for (j = 0; j < PTRS_PER_PMD ; j++) { - prefetchw(pmd+j+(PREFETCH_STRIDE/16)); + prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd)); free_one_pmd(tlb, pmd+j); } pmd_free_tlb(tlb, pmd); diff -Nru a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c --- a/sound/oss/cs4281/cs4281m.c Thu May 30 21:28:59 2002 +++ b/sound/oss/cs4281/cs4281m.c Thu May 30 21:28:59 2002 @@ -1942,8 +1942,8 @@ len -= x; } CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO - "cs4281: clear_advance(): memset %d at 0x%.8x for %d size \n", - (unsigned)c, (unsigned)((char *) buf) + bptr, len)); + "cs4281: clear_advance(): memset %d at %p for %d size \n", + (unsigned)c, ((char *) buf) + bptr, len)); memset(((char *) buf) + bptr, c, len); } @@ -1978,9 +1978,8 @@ wake_up(&s->dma_adc.wait); } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", - (unsigned)s, s->dma_adc.hwptr, - s->dma_adc.total_bytes, s->dma_adc.count)); + "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", + s, s->dma_adc.hwptr, s->dma_adc.total_bytes, s->dma_adc.count)); } // update DAC pointer // @@ -2012,11 +2011,10 @@ // Continue to play silence until the _release. // CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): memset %d at 0x%.8x for %d size \n", + "cs4281: cs4281_update_ptr(): memset %d at %p for %d size \n", (unsigned)(s->prop_dac.fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0, - (unsigned)s->dma_dac.rawbuf, - s->dma_dac.dmasize)); + s->dma_dac.rawbuf, s->dma_dac.dmasize)); memset(s->dma_dac.rawbuf, (s->prop_dac. fmt & (AFMT_U8 | AFMT_U16_LE)) ? @@ -2047,9 +2045,8 @@ } } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", - (unsigned) s, s->dma_dac.hwptr, - s->dma_dac.total_bytes, s->dma_dac.count)); + "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", + s, s->dma_dac.hwptr, s->dma_dac.total_bytes, s->dma_dac.count)); } } @@ -2180,8 +2177,7 @@ VALIDATE_STATE(s); CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n", - (unsigned) s, cmd)); + "cs4281: mixer_ioctl(): s=%p cmd=0x%.8x\n", s, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -2746,9 +2742,8 @@ CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4281: CopySamples()+ ")); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " dst=0x%x src=0x%x count=%d iChannels=%d fmt=0x%x\n", - (unsigned) dst, (unsigned) src, (unsigned) count, - (unsigned) iChannels, (unsigned) fmt)); + " dst=%p src=%p count=%d iChannels=%d fmt=0x%x\n", + dst, src, (unsigned) count, (unsigned) iChannels, (unsigned) fmt)); // Gershwin does format conversion in hardware so normally // we don't do any host based coversion. The data formatter @@ -2828,9 +2823,9 @@ void *src = hwsrc; //default to the standard destination buffer addr CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO - "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=0x%.8x\n", + "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=%p\n", s->prop_adc.fmt, s->prop_adc.fmt_original, - (unsigned) cnt, (unsigned) dest)); + (unsigned) cnt, dest)); if (cnt > s->dma_adc.dmasize) { cnt = s->dma_adc.dmasize; @@ -2875,7 +2870,7 @@ unsigned copied = 0; CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()+ %d \n", count)); + printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count)); VALIDATE_STATE(s); if (ppos != &file->f_pos) @@ -2898,7 +2893,7 @@ // while (count > 0) { CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n", + "_read() count>0 count=%Zu .count=%d .swptr=%d .hwptr=%d \n", count, s->dma_adc.count, s->dma_adc.swptr, s->dma_adc.hwptr)); spin_lock_irqsave(&s->lock, flags); @@ -2955,11 +2950,10 @@ // the "cnt" is the number of bytes to read. CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO - "_read() copy_to cnt=%d count=%d ", cnt, count)); + "_read() copy_to cnt=%d count=%Zu ", cnt, count)); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " .dmasize=%d .count=%d buffer=0x%.8x ret=%d\n", - s->dma_adc.dmasize, s->dma_adc.count, - (unsigned) buffer, ret)); + " .dmasize=%d .count=%d buffer=%p ret=%Zd\n", + s->dma_adc.dmasize, s->dma_adc.count, buffer, ret)); if (cs_copy_to_user (s, buffer, s->dma_adc.rawbuf + swptr, cnt, &copied)) @@ -2975,7 +2969,7 @@ start_adc(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()- %d\n", ret)); + printk(KERN_INFO "cs4281: cs4281_read()- %Zd\n", ret)); return ret; } @@ -2991,7 +2985,7 @@ int cnt; CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()+ count=%d\n", + printk(KERN_INFO "cs4281: cs4281_write()+ count=%Zu\n", count)); VALIDATE_STATE(s); @@ -3047,7 +3041,7 @@ start_dac(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()- %d\n", ret)); + printk(KERN_INFO "cs4281: cs4281_write()- %Zd\n", ret)); return ret; } @@ -3168,8 +3162,7 @@ int val, mapped, ret; CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: cs4281_ioctl(): file=0x%.8x cmd=0x%.8x\n", - (unsigned) file, cmd)); + "cs4281: cs4281_ioctl(): file=%p cmd=0x%.8x\n", file, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -3599,8 +3592,8 @@ (struct cs4281_state *) file->private_data; CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO - "cs4281: cs4281_release(): inode=0x%.8x file=0x%.8x f_mode=%d\n", - (unsigned) inode, (unsigned) file, file->f_mode)); + "cs4281: cs4281_release(): inode=%p file=%p f_mode=%d\n", + inode, file, file->f_mode)); VALIDATE_STATE(s); @@ -3634,8 +3627,8 @@ struct list_head *entry; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO - "cs4281: cs4281_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n", - (unsigned) inode, (unsigned) file, file->f_mode)); + "cs4281: cs4281_open(): inode=%p file=%p f_mode=0x%x\n", + inode, file, file->f_mode)); list_for_each(entry, &cs4281_devs) { @@ -4344,10 +4337,8 @@ CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO - "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=0x%.8x pBA1=0x%.8x \n", - (unsigned) temp1, (unsigned) temp2, - (unsigned) s->pBA0, (unsigned) s->pBA1)); - + "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=%p pBA1=%p \n", + (unsigned) temp1, (unsigned) temp2, s->pBA0, s->pBA1)); CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO "cs4281: probe() pBA0phys=0x%.8x pBA1phys=0x%.8x\n", @@ -4394,15 +4385,13 @@ if (pmdev) { CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO - "cs4281: probe() pm_register() succeeded (0x%x).\n", - (unsigned)pmdev)); + "cs4281: probe() pm_register() succeeded (%p).\n", pmdev)); pmdev->data = s; } else { CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO - "cs4281: probe() pm_register() failed (0x%x).\n", - (unsigned)pmdev)); + "cs4281: probe() pm_register() failed (%p).\n", pmdev)); s->pm.flags |= CS4281_PM_NOT_REGISTERED; } #endif diff -Nru a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c --- a/sound/oss/cs4281/cs4281pm-24.c Thu May 30 21:28:59 2002 +++ b/sound/oss/cs4281/cs4281pm-24.c Thu May 30 21:28:59 2002 @@ -38,16 +38,16 @@ #define CS4281_SUSPEND_TBL cs4281_suspend_tbl #define CS4281_RESUME_TBL cs4281_resume_tbl */ -#define CS4281_SUSPEND_TBL cs4281_null -#define CS4281_RESUME_TBL cs4281_null +#define CS4281_SUSPEND_TBL (int (*) (struct pci_dev *, u32)) cs4281_null +#define CS4281_RESUME_TBL (int (*) (struct pci_dev *)) cs4281_null int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) { struct cs4281_state *state; CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: cs4281_pm_callback dev=0x%x rqst=0x%x state=%d\n", - (unsigned)dev,(unsigned)rqst,(unsigned)data)); + "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n", + dev,(unsigned)rqst,data)); state = (struct cs4281_state *) dev->data; if (state) { switch(rqst) { @@ -78,7 +78,7 @@ } #else /* CS4281_PM */ -#define CS4281_SUSPEND_TBL cs4281_null -#define CS4281_RESUME_TBL cs4281_null +#define CS4281_SUSPEND_TBL (int (*) (struct pci_dev *, u32)) cs4281_null +#define CS4281_RESUME_TBL (int (*) (struct pci_dev *)) cs4281_null #endif /* CS4281_PM */