diff -Nru a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
--- a/Documentation/DocBook/Makefile Sat Oct 25 11:45:09 2003
+++ b/Documentation/DocBook/Makefile Sat Oct 25 11:45:09 2003
@@ -12,7 +12,7 @@
deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \
writing_usb_driver.sgml scsidrivers.sgml sis900.sgml \
kernel-api.sgml journal-api.sgml lsm.sgml usb.sgml \
- gadget.sgml
+ gadget.sgml libata.sgml
###
# The build process is as follows (targets):
diff -Nru a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/Documentation/DocBook/libata.tmpl Sat Oct 25 11:45:10 2003
@@ -0,0 +1,91 @@
+
+
+
+
+ libATA Developer's Guide
+
+
+
+ Jeff
+ Garzik
+
+
+
+
+ 2003
+ Jeff Garzik
+
+
+
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+
+
+
+
+
+
+
+ Thanks
+
+ The bulk of the ATA knowledge comes thanks to long conversations with
+ Andre Hedrick (www.linux-ide.org).
+
+
+ Thanks to Alan Cox for pointing out similarities
+ between SATA and SCSI, and in general for motivation to hack on
+ libata.
+
+
+ libata's device detection
+ method, ata_pio_devchk, and in general all the early probing was
+ based on extensive study of Hale Landis's probe/reset code in his
+ ATADRVR driver (www.ata-atapi.com).
+
+
+
+
+ libata Library
+!Edrivers/scsi/libata-core.c
+!Edrivers/scsi/libata-scsi.c
+
+
+
+ libata Internals
+!Idrivers/scsi/libata-core.c
+!Idrivers/scsi/libata-scsi.c
+
+
+
+ ata_piix Internals
+!Idrivers/scsi/ata_piix.c
+
+
+
+ ata_sil Internals
+!Idrivers/scsi/sata_sil.c
+
+
+
+ ata_via Internals
+!Idrivers/scsi/sata_via.c
+
+
+
diff -Nru a/MAINTAINERS b/MAINTAINERS
--- a/MAINTAINERS Sat Oct 25 11:45:09 2003
+++ b/MAINTAINERS Sat Oct 25 11:45:09 2003
@@ -1753,6 +1753,12 @@
W: http://www.nsa.gov/selinux
S: Supported
+SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER
+P: Pat Gefre
+M: pfg@sgi.com
+L: linux-ia64@vger.kernel.org
+S: Supported
+
SGI VISUAL WORKSTATION 320 AND 540
P: Andrey Panin
M: pazke@donpac.ru
@@ -2301,7 +2307,7 @@
XFS FILESYSTEM
P: Silicon Graphics Inc
M: owner-xfs@oss.sgi.com
-M: lord@sgi.com
+M: nathans@sgi.com
L: linux-xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs
S: Supported
diff -Nru a/Makefile b/Makefile
--- a/Makefile Sat Oct 25 11:45:09 2003
+++ b/Makefile Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -test8
+EXTRAVERSION = -test9
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff -Nru a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
--- a/arch/arm/mm/proc-xscale.S Sat Oct 25 11:45:09 2003
+++ b/arch/arm/mm/proc-xscale.S Sat Oct 25 11:45:09 2003
@@ -670,7 +670,7 @@
.type __80321_proc_info,#object
__80321_proc_info:
.long 0x69052420
- .long 0xfffffff0
+ .long 0xfffff7e0
.long 0x00000c0e
b __xscale_setup
.long cpu_arch_name
diff -Nru a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
--- a/arch/i386/kernel/acpi/boot.c Sat Oct 25 11:45:09 2003
+++ b/arch/i386/kernel/acpi/boot.c Sat Oct 25 11:45:09 2003
@@ -247,6 +247,34 @@
#endif /*CONFIG_X86_IO_APIC*/
+#ifdef CONFIG_ACPI_BUS
+/*
+ * Set specified PIC IRQ to level triggered mode.
+ *
+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
+ * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
+ *
+ * As the BIOS should have done this for us,
+ * print a warning if the IRQ wasn't already set to level.
+ */
+
+void acpi_pic_set_level_irq(unsigned int irq)
+{
+ unsigned char mask = 1 << (irq & 7);
+ unsigned int port = 0x4d0 + (irq >> 3);
+ unsigned char val = inb(port);
+
+ if (!(val & mask)) {
+ printk(KERN_WARNING PREFIX "IRQ %d was Edge Triggered, "
+ "setting to Level Triggerd\n", irq);
+ outb(val | mask, port);
+ }
+}
+#endif /* CONFIG_ACPI_BUS */
+
+
static unsigned long __init
acpi_scan_rsdp (
diff -Nru a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
--- a/arch/i386/kernel/io_apic.c Sat Oct 25 11:45:09 2003
+++ b/arch/i386/kernel/io_apic.c Sat Oct 25 11:45:09 2003
@@ -1138,12 +1138,13 @@
return 0;
}
-int irq_vector[NR_IRQS] = { FIRST_DEVICE_VECTOR , 0 };
+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
+u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
static int __init assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
- BUG_ON(irq >= NR_IRQS);
+ BUG_ON(irq >= NR_IRQ_VECTORS);
if (IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
next:
diff -Nru a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
--- a/arch/i386/kernel/irq.c Sat Oct 25 11:45:09 2003
+++ b/arch/i386/kernel/irq.c Sat Oct 25 11:45:09 2003
@@ -380,7 +380,7 @@
spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) {
case 1: {
- unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
+ unsigned int status = desc->status & ~IRQ_DISABLED;
desc->status = status;
if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = status | IRQ_REPLAY;
diff -Nru a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
--- a/arch/i386/kernel/microcode.c Sat Oct 25 11:45:09 2003
+++ b/arch/i386/kernel/microcode.c Sat Oct 25 11:45:09 2003
@@ -324,8 +324,8 @@
/* check extended table checksum */
if (ext_table_size) {
int ext_table_sum = 0;
- i = ext_table_size / DWSIZE;
int * ext_tablep = (((void *) newmc) + MC_HEADER_SIZE + data_size);
+ i = ext_table_size / DWSIZE;
while (i--) ext_table_sum += ext_tablep[i];
if (ext_table_sum) {
printk(KERN_WARNING "microcode: aborting, bad extended signature table checksum\n");
diff -Nru a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
--- a/arch/i386/kernel/setup.c Sat Oct 25 11:45:09 2003
+++ b/arch/i386/kernel/setup.c Sat Oct 25 11:45:09 2003
@@ -139,22 +139,23 @@
probe_extension_roms(roms);
}
-static void __init limit_regions (unsigned long long size)
+static void __init limit_regions(unsigned long long size)
{
+ unsigned long long current_addr = 0;
int i;
- unsigned long long current_size = 0;
for (i = 0; i < e820.nr_map; i++) {
if (e820.map[i].type == E820_RAM) {
- current_size += e820.map[i].size;
- if (current_size >= size) {
- e820.map[i].size -= current_size-size;
+ current_addr = e820.map[i].addr + e820.map[i].size;
+ if (current_addr >= size) {
+ e820.map[i].size -= current_addr-size;
e820.nr_map = i + 1;
return;
}
}
}
}
+
static void __init add_memory_region(unsigned long long start,
unsigned long long size, int type)
{
@@ -964,7 +965,6 @@
apm_info.bios = APM_BIOS_INFO;
ist_info = IST_INFO;
saved_videomode = VIDEO_MODE;
- printk("Video mode to be used for restore is %lx\n", saved_videomode);
if( SYS_DESC_TABLE.length != 0 ) {
MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
machine_id = SYS_DESC_TABLE.table[0];
diff -Nru a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
--- a/arch/i386/kernel/time.c Sat Oct 25 11:45:09 2003
+++ b/arch/i386/kernel/time.c Sat Oct 25 11:45:09 2003
@@ -104,6 +104,15 @@
lost = jiffies - wall_jiffies;
if (lost)
usec += lost * (1000000 / HZ);
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0) && usec > tickadj)
+ usec = tickadj;
+
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry(&xtime_lock, seq));
diff -Nru a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
--- a/arch/ia64/ia32/binfmt_elf32.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/ia32/binfmt_elf32.c Sat Oct 25 11:45:09 2003
@@ -62,14 +62,13 @@
struct page *
ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int no_share)
{
- struct page *pg = ia32_shared_page[(address - vma->vm_start)/PAGE_SIZE];
-
+ struct page *pg = ia32_shared_page[smp_processor_id()];
get_page(pg);
return pg;
}
static struct vm_operations_struct ia32_shared_page_vm_ops = {
- .nopage =ia32_install_shared_page
+ .nopage = ia32_install_shared_page
};
void
@@ -78,7 +77,7 @@
struct vm_area_struct *vma;
/*
- * Map GDT and TSS below 4GB, where the processor can find them. We need to map
+ * Map GDT below 4GB, where the processor can find it. We need to map
* it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/
@@ -86,7 +85,7 @@
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_GDT_OFFSET;
- vma->vm_end = vma->vm_start + max(PAGE_SIZE, 2*IA32_PAGE_SIZE);
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_MAYREAD;
vma->vm_ops = &ia32_shared_page_vm_ops;
diff -Nru a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
--- a/arch/ia64/ia32/ia32_entry.S Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/ia32/ia32_entry.S Sat Oct 25 11:45:09 2003
@@ -32,7 +32,7 @@
ENTRY(ia32_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
- alloc r16=ar.pfs,2,2,4,0
+ alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK
mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork
@@ -41,6 +41,8 @@
mov out3=16 // stacksize (compensates for 16-byte scratch area)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
+ zxt4 out4=in2 // out4 = parent_tidptr
+ zxt4 out5=in4 // out5 = child_tidptr
br.call.sptk.many rp=do_fork
.ret0: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
@@ -142,6 +144,19 @@
;;
st8 [r2]=r3 // initialize return code to -ENOSYS
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args
+ // Need to reload arguments (they may be changed by the tracing process)
+ adds r2=IA64_PT_REGS_R9_OFFSET+16,sp // r2 = &pt_regs.r9
+ adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
+ ;;
+ ld4 r33=[r2],8 // r9 == ecx
+ ld4 r37=[r3],16 // r13 == ebp
+ ;;
+ ld4 r34=[r2],8 // r10 == edx
+ ld4 r36=[r3],8 // r15 == edi
+ ;;
+ ld4 r32=[r2],8 // r11 == ebx
+ ld4 r35=[r3],8 // r14 == esi
+ ;;
.ret2: br.call.sptk.few rp=b6 // do the syscall
.ia32_strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed?
@@ -424,13 +439,13 @@
data8 sys_ni_syscall /* 235 */
data8 sys_ni_syscall
data8 sys_ni_syscall
- data8 sys_ni_syscall
+ data8 sys_tkill
data8 sys_ni_syscall
data8 compat_sys_futex /* 240 */
data8 compat_sys_sched_setaffinity
data8 compat_sys_sched_getaffinity
- data8 sys_ni_syscall
- data8 sys_ni_syscall
+ data8 sys32_set_thread_area
+ data8 sys32_get_thread_area
data8 sys_ni_syscall /* 245 */
data8 sys_ni_syscall
data8 sys_ni_syscall
@@ -438,13 +453,13 @@
data8 sys_ni_syscall
data8 sys_ni_syscall /* 250 */
data8 sys_ni_syscall
- data8 sys_ni_syscall
+ data8 sys_exit_group
data8 sys_ni_syscall
data8 sys_epoll_create
data8 sys32_epoll_ctl /* 255 */
data8 sys32_epoll_wait
- data8 sys_ni_syscall
- data8 sys_ni_syscall
+ data8 sys_remap_file_pages
+ data8 sys_set_tid_address
data8 sys_ni_syscall
data8 sys_ni_syscall /* 260 */
data8 sys_ni_syscall
diff -Nru a/arch/ia64/ia32/ia32_ldt.c b/arch/ia64/ia32/ia32_ldt.c
--- a/arch/ia64/ia32/ia32_ldt.c Sat Oct 25 11:45:10 2003
+++ b/arch/ia64/ia32/ia32_ldt.c Sat Oct 25 11:45:10 2003
@@ -82,7 +82,7 @@
static int
write_ldt (void * ptr, unsigned long bytecount, int oldmode)
{
- struct ia32_modify_ldt_ldt_s ldt_info;
+ struct ia32_user_desc ldt_info;
__u64 entry;
int ret;
diff -Nru a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
--- a/arch/ia64/ia32/ia32_signal.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/ia32/ia32_signal.c Sat Oct 25 11:45:09 2003
@@ -877,8 +877,6 @@
regs->cr_iip = IA32_SA_HANDLER(ka);
set_fs(USER_DS);
- regs->r16 = (__USER_DS << 16) | (__USER_DS); /* ES == DS, GS, FS are zero */
- regs->r17 = (__USER_DS << 16) | __USER_CS;
#if 0
regs->eflags &= ~TF_MASK;
@@ -949,9 +947,6 @@
regs->cr_iip = IA32_SA_HANDLER(ka);
set_fs(USER_DS);
-
- regs->r16 = (__USER_DS << 16) | (__USER_DS); /* ES == DS, GS, FS are zero */
- regs->r17 = (__USER_DS << 16) | __USER_CS;
#if 0
regs->eflags &= ~TF_MASK;
diff -Nru a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
--- a/arch/ia64/ia32/ia32_support.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/ia32/ia32_support.c Sat Oct 25 11:45:09 2003
@@ -23,14 +23,16 @@
#include
#include
#include
+#include
#include "ia32priv.h"
extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
struct exec_domain ia32_exec_domain;
-struct page *ia32_shared_page[(2*IA32_PAGE_SIZE + PAGE_SIZE - 1)/PAGE_SIZE];
-unsigned long *ia32_gdt;
+struct page *ia32_shared_page[NR_CPUS];
+unsigned long *ia32_boot_gdt;
+unsigned long *cpu_gdt_table[NR_CPUS];
static unsigned long
load_desc (u16 selector)
@@ -43,8 +45,8 @@
table = (unsigned long *) IA32_LDT_OFFSET;
limit = IA32_LDT_ENTRIES;
} else {
- table = ia32_gdt;
- limit = IA32_PAGE_SIZE / sizeof(ia32_gdt[0]);
+ table = cpu_gdt_table[smp_processor_id()];
+ limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
}
index = selector >> IA32_SEGSEL_INDEX_SHIFT;
if (index >= limit)
@@ -66,6 +68,34 @@
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
+int
+ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
+{
+ struct desc_struct *desc;
+ struct ia32_user_desc info;
+ int idx;
+
+ if (copy_from_user(&info, (void *)(childregs->r14 & 0xffffffff), sizeof(info)))
+ return -EFAULT;
+ if (LDT_empty(&info))
+ return -EINVAL;
+
+ idx = info.entry_number;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+
+ /* XXX: can this be done in a cleaner way ? */
+ load_TLS(&child->thread, smp_processor_id());
+ ia32_load_segment_descriptors(child);
+ load_TLS(¤t->thread, smp_processor_id());
+
+ return 0;
+}
+
void
ia32_save_state (struct task_struct *t)
{
@@ -83,14 +113,13 @@
{
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = ia64_task_regs(t);
- int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
eflag = t->thread.eflag;
fsr = t->thread.fsr;
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
- tssd = load_desc(_TSS(nr)); /* TSSD */
+ tssd = load_desc(_TSS); /* TSSD */
ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
ia64_setreg(_IA64_REG_AR_FSR, fsr);
@@ -102,8 +131,10 @@
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
ia64_set_kr(IA64_KR_TSSD, tssd);
- regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17;
- regs->r30 = load_desc(_LDT(nr)); /* LDTD */
+ regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
+ regs->r30 = load_desc(_LDT); /* LDTD */
+ load_TLS(&t->thread, smp_processor_id());
+
put_cpu();
}
@@ -113,37 +144,43 @@
void
ia32_gdt_init (void)
{
- unsigned long *tss;
+ int cpu = smp_processor_id();
+
+ ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
+ cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
+
+ /* Copy from the boot cpu's GDT */
+ memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
+}
+
+
+/*
+ * Setup IA32 GDT and TSS
+ */
+void
+ia32_boot_gdt_init (void)
+{
unsigned long ldt_size;
- int nr;
ia32_shared_page[0] = alloc_page(GFP_KERNEL);
- ia32_gdt = page_address(ia32_shared_page[0]);
- tss = ia32_gdt + IA32_PAGE_SIZE/sizeof(ia32_gdt[0]);
-
- if (IA32_PAGE_SIZE == PAGE_SIZE) {
- ia32_shared_page[1] = alloc_page(GFP_KERNEL);
- tss = page_address(ia32_shared_page[1]);
- }
+ ia32_boot_gdt = page_address(ia32_shared_page[0]);
+ cpu_gdt_table[0] = ia32_boot_gdt;
/* CS descriptor in IA-32 (scrambled) format */
- ia32_gdt[__USER_CS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
- 0xb, 1, 3, 1, 1, 1, 1);
+ ia32_boot_gdt[__USER_CS >> 3]
+ = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
+ 0xb, 1, 3, 1, 1, 1, 1);
/* DS descriptor in IA-32 (scrambled) format */
- ia32_gdt[__USER_DS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
- 0x3, 1, 3, 1, 1, 1, 1);
+ ia32_boot_gdt[__USER_DS >> 3]
+ = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
+ 0x3, 1, 3, 1, 1, 1, 1);
- /* We never change the TSS and LDT descriptors, so we can share them across all CPUs. */
ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
- for (nr = 0; nr < NR_CPUS; ++nr) {
- ia32_gdt[_TSS(nr) >> IA32_SEGSEL_INDEX_SHIFT]
- = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
- 0xb, 0, 3, 1, 1, 1, 0);
- ia32_gdt[_LDT(nr) >> IA32_SEGSEL_INDEX_SHIFT]
- = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
- 0x2, 0, 3, 1, 1, 1, 0);
- }
+ ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
+ 0xb, 0, 3, 1, 1, 1, 0);
+ ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
+ 0x2, 0, 3, 1, 1, 1, 0);
}
/*
diff -Nru a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
--- a/arch/ia64/ia32/ia32priv.h Sat Oct 25 11:45:10 2003
+++ b/arch/ia64/ia32/ia32priv.h Sat Oct 25 11:45:10 2003
@@ -10,6 +10,8 @@
#include
#include
+#include
+
/*
* 32 bit structures for IA32 support.
*/
@@ -327,15 +329,23 @@
#define __USER_CS 0x23
#define __USER_DS 0x2B
-#define FIRST_TSS_ENTRY 6
-#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
-#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
-#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+/*
+ * The per-cpu GDT has 32 entries: see
+ */
+#define GDT_ENTRIES 32
+
+#define GDT_SIZE (GDT_ENTRIES * 8)
+
+#define TSS_ENTRY 14
+#define LDT_ENTRY (TSS_ENTRY + 1)
#define IA32_SEGSEL_RPL (0x3 << 0)
#define IA32_SEGSEL_TI (0x1 << 2)
#define IA32_SEGSEL_INDEX_SHIFT 3
+#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
+#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
+
#define IA32_SEG_BASE 16
#define IA32_SEG_TYPE 40
#define IA32_SEG_SYS 44
@@ -419,7 +429,42 @@
#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
-struct ia32_modify_ldt_ldt_s {
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ 0x7100)
+
+#define LDT_empty(info) ( \
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 )
+
+static inline void
+load_TLS (struct thread_struct *t, unsigned int cpu)
+{
+ extern unsigned long *cpu_gdt_table[NR_CPUS];
+
+ memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long));
+ memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long));
+ memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long));
+}
+
+struct ia32_user_desc {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
diff -Nru a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
--- a/arch/ia64/ia32/sys_ia32.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/ia32/sys_ia32.c Sat Oct 25 11:45:09 2003
@@ -2817,6 +2817,114 @@
return numevents;
}
+/*
+ * Get a yet unused TLS descriptor index.
+ */
+static int
+get_free_idx (void)
+{
+ struct thread_struct *t = ¤t->thread;
+ int idx;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (desc_empty(t->tls_array + idx))
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+asmlinkage int
+sys32_set_thread_area (struct ia32_user_desc *u_info)
+{
+ struct thread_struct *t = ¤t->thread;
+ struct ia32_user_desc info;
+ struct desc_struct *desc;
+ int cpu, idx;
+
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+ idx = info.entry_number;
+
+ /*
+ * index -1 means the kernel should try to find and allocate an empty descriptor:
+ */
+ if (idx == -1) {
+ idx = get_free_idx();
+ if (idx < 0)
+ return idx;
+ if (put_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ }
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ cpu = smp_processor_id();
+
+ if (LDT_empty(&info)) {
+ desc->a = 0;
+ desc->b = 0;
+ } else {
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+ load_TLS(t, cpu);
+ return 0;
+}
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) ( \
+ (((desc)->a >> 16) & 0x0000ffff) | \
+ (((desc)->b << 16) & 0x00ff0000) | \
+ ( (desc)->b & 0xff000000) )
+
+#define GET_LIMIT(desc) ( \
+ ((desc)->a & 0x0ffff) | \
+ ((desc)->b & 0xf0000) )
+
+#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
+
+asmlinkage int
+sys32_get_thread_area (struct ia32_user_desc *u_info)
+{
+ struct ia32_user_desc info;
+ struct desc_struct *desc;
+ int idx;
+
+ if (get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ info.entry_number = idx;
+ info.base_addr = GET_BASE(desc);
+ info.limit = GET_LIMIT(desc);
+ info.seg_32bit = GET_32BIT(desc);
+ info.contents = GET_CONTENTS(desc);
+ info.read_exec_only = !GET_WRITABLE(desc);
+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
+ info.seg_not_present = !GET_PRESENT(desc);
+ info.useable = GET_USEABLE(desc);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
struct ncp_mount_data32 {
diff -Nru a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
--- a/arch/ia64/kernel/efi.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/efi.c Sat Oct 25 11:45:09 2003
@@ -297,9 +297,9 @@
u64 start;
u64 end;
} prev, curr;
- void *efi_map_start, *efi_map_end, *p, *q, *r;
+ void *efi_map_start, *efi_map_end, *p, *q;
efi_memory_desc_t *md, *check_md;
- u64 efi_desc_size, start, end, granule_addr, first_non_wb_addr = 0;
+ u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0;
efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
@@ -312,41 +312,34 @@
if (!(md->attribute & EFI_MEMORY_WB))
continue;
- if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > first_non_wb_addr) {
- /*
- * Search for the next run of contiguous WB memory. Start search
- * at first granule boundary covered by md.
- */
- granule_addr = ((md->phys_addr + IA64_GRANULE_SIZE - 1)
- & -IA64_GRANULE_SIZE);
- first_non_wb_addr = granule_addr;
- for (q = p; q < efi_map_end; q += efi_desc_size) {
- check_md = q;
-
- if (check_md->attribute & EFI_MEMORY_WB)
- trim_bottom(check_md, granule_addr);
-
- if (check_md->phys_addr < granule_addr)
- continue;
+ /*
+ * granule_addr is the base of md's first granule.
+ * [granule_addr - first_non_wb_addr) is guaranteed to
+ * be contiguous WB memory.
+ */
+ granule_addr = md->phys_addr & ~(IA64_GRANULE_SIZE - 1);
+ first_non_wb_addr = max(first_non_wb_addr, granule_addr);
- if (!(check_md->attribute & EFI_MEMORY_WB))
- break; /* hit a non-WB region; stop search */
+ if (first_non_wb_addr < md->phys_addr) {
+ trim_bottom(md, granule_addr + IA64_GRANULE_SIZE);
+ granule_addr = md->phys_addr & ~(IA64_GRANULE_SIZE - 1);
+ first_non_wb_addr = max(first_non_wb_addr, granule_addr);
+ }
- if (check_md->phys_addr != first_non_wb_addr)
- break; /* hit a memory hole; stop search */
+ for (q = p; q < efi_map_end; q += efi_desc_size) {
+ check_md = q;
+ if ((check_md->attribute & EFI_MEMORY_WB) &&
+ (check_md->phys_addr == first_non_wb_addr))
first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT;
- }
- /* round it down to the previous granule-boundary: */
- first_non_wb_addr &= -IA64_GRANULE_SIZE;
-
- if (!(first_non_wb_addr > granule_addr))
- continue; /* couldn't find enough contiguous memory */
-
- for (r = p; r < q; r += efi_desc_size)
- trim_top(r, first_non_wb_addr);
+ else
+ break; /* non-WB or hole */
}
+ last_granule_addr = first_non_wb_addr & ~(IA64_GRANULE_SIZE - 1);
+ if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT))
+ trim_top(md, last_granule_addr);
+
if (is_available_memory(md)) {
if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {
if (md->phys_addr > mem_limit)
@@ -692,8 +685,7 @@
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
- if ((md->phys_addr <= phys_addr) && (phys_addr <=
- (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1)))
+ if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md->type;
}
return 0;
@@ -713,9 +705,34 @@
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
- if ((md->phys_addr <= phys_addr) && (phys_addr <=
- (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1)))
+ if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md->attribute;
+ }
+ return 0;
+}
+
+int
+valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+{
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+
+ if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) {
+ if (!(md->attribute & EFI_MEMORY_WB))
+ return 0;
+
+ if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr)
+ *size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr;
+ return 1;
+ }
}
return 0;
}
diff -Nru a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S
--- a/arch/ia64/kernel/gate-data.S Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/gate-data.S Sat Oct 25 11:45:09 2003
@@ -1,3 +1,3 @@
- .section .data.gate, "ax"
+ .section .data.gate, "aw"
.incbin "arch/ia64/kernel/gate.so"
diff -Nru a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
--- a/arch/ia64/kernel/ia64_ksyms.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/ia64_ksyms.c Sat Oct 25 11:45:09 2003
@@ -34,13 +34,8 @@
#include
EXPORT_SYMBOL(probe_irq_mask);
-#include
#include
-/* not coded yet?? EXPORT_SYMBOL(csum_ipv6_magic); */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(csum_tcpudp_magic);
-EXPORT_SYMBOL(ip_compute_csum);
-EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
#include
EXPORT_SYMBOL(__ia64_memcpy_fromio);
@@ -58,9 +53,11 @@
EXPORT_SYMBOL(clear_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP
+#include
#include
EXPORT_SYMBOL(vmalloc_end);
EXPORT_SYMBOL(ia64_pfn_valid);
+EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif
#include
diff -Nru a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
--- a/arch/ia64/kernel/irq.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/irq.c Sat Oct 25 11:45:09 2003
@@ -379,8 +379,11 @@
void disable_irq(unsigned int irq)
{
+ irq_desc_t *desc = irq_descp(irq);
+
disable_irq_nosync(irq);
- synchronize_irq(irq);
+ if (desc->action)
+ synchronize_irq(irq);
}
/**
diff -Nru a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
--- a/arch/ia64/kernel/module.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/module.c Sat Oct 25 11:45:09 2003
@@ -322,6 +322,10 @@
void
module_free (struct module *mod, void *module_region)
{
+ if (mod->arch.init_unw_table && module_region == mod->module_init) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
vfree(module_region);
}
@@ -843,28 +847,92 @@
return -ENOEXEC;
}
+/*
+ * Modules contain a single unwind table which covers both the core and the init text
+ * sections but since the two are not contiguous, we need to split this table up such that
+ * we can register (and unregister) each "segment" seperately. Fortunately, this sounds
+ * more complicated than it really is.
+ */
+static void
+register_unwind_table (struct module *mod)
+{
+ struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
+ struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
+ struct unw_table_entry tmp, *e1, *e2, *core, *init;
+ unsigned long num_init = 0, num_core = 0;
+
+ /* First, count how many init and core unwind-table entries there are. */
+ for (e1 = start; e1 < end; ++e1)
+ if (in_init(mod, e1->start_offset))
+ ++num_init;
+ else
+ ++num_core;
+ /*
+ * Second, sort the table such that all unwind-table entries for the init and core
+ * text sections are nicely separated. We do this with a stupid bubble sort
+ * (unwind tables don't get ridiculously huge).
+ */
+ for (e1 = start; e1 < end; ++e1) {
+ for (e2 = e1 + 1; e2 < end; ++e2) {
+ if (e2->start_offset < e1->start_offset) {
+ tmp = *e1;
+ *e1 = *e2;
+ *e2 = tmp;
+ }
+ }
+ }
+ /*
+ * Third, locate the init and core segments in the unwind table:
+ */
+ if (in_init(mod, start->start_offset)) {
+ init = start;
+ core = start + num_init;
+ } else {
+ core = start;
+ init = start + num_core;
+ }
+
+ DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__,
+ mod->name, mod->arch.gp, num_init, num_core);
+
+ /*
+ * Fourth, register both tables (if not empty).
+ */
+ if (num_core > 0) {
+ mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
+ core, core + num_core);
+ DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__,
+ mod->arch.core_unw_table, core, core + num_core);
+ }
+ if (num_init > 0) {
+ mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
+ init, init + num_init);
+ DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__,
+ mod->arch.init_unw_table, init, init + num_init);
+ }
+}
+
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init);
if (mod->arch.unwind)
- mod->arch.unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
- (void *) mod->arch.unwind->sh_addr,
- ((void *) mod->arch.unwind->sh_addr
- + mod->arch.unwind->sh_size));
+ register_unwind_table(mod);
return 0;
}
void
module_arch_cleanup (struct module *mod)
{
- if (mod->arch.unwind)
- unw_remove_unwind_table(mod->arch.unw_table);
+ if (mod->arch.init_unw_table)
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ if (mod->arch.core_unw_table)
+ unw_remove_unwind_table(mod->arch.core_unw_table);
}
#ifdef CONFIG_SMP
void
-percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
+percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
for (i = 0; i < NR_CPUS; i++)
diff -Nru a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
--- a/arch/ia64/kernel/perfmon.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/perfmon.c Sat Oct 25 11:45:09 2003
@@ -202,8 +202,8 @@
#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
-#define LOCK_PFS() spin_lock(&pfm_sessions.pfs_lock)
-#define UNLOCK_PFS() spin_unlock(&pfm_sessions.pfs_lock)
+#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
+#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
@@ -618,6 +618,7 @@
.get_sb = pfmfs_get_sb,
.kill_sb = kill_anon_super,
};
+
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
@@ -634,6 +635,8 @@
static void pfm_lazy_save_regs (struct task_struct *ta);
#endif
+void dump_pmu_state(const char *);
+
/*
* the HP simulator must be first because
* CONFIG_IA64_HP_SIM is independent of CONFIG_MCKINLEY or CONFIG_ITANIUM
@@ -1283,10 +1286,11 @@
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
+ unsigned long flags;
/*
* validy checks on cpu_mask have been done upstream
*/
- LOCK_PFS();
+ LOCK_PFS(flags);
DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
@@ -1325,7 +1329,7 @@
is_syswide,
cpu));
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
return 0;
@@ -1334,7 +1338,7 @@
pfm_sessions.pfs_sys_session[cpu]->pid,
smp_processor_id()));
abort:
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
return -EBUSY;
@@ -1343,11 +1347,11 @@
static int
pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
{
-
+ unsigned long flags;
/*
* validy checks on cpu_mask have been done upstream
*/
- LOCK_PFS();
+ LOCK_PFS(flags);
DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
@@ -1380,7 +1384,7 @@
is_syswide,
cpu));
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
return 0;
}
@@ -1655,7 +1659,7 @@
}
/*
- * context is locked when coming here
+ * context is locked when coming here and interrupts are disabled
*/
static inline int
pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
@@ -1789,6 +1793,7 @@
* even if the task itself is in the middle of being ctxsw out.
*/
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
+
static int
pfm_close(struct inode *inode, struct file *filp)
{
@@ -1803,10 +1808,6 @@
int free_possible = 1;
int state, is_system;
- { u64 psr = pfm_get_psr();
- BUG_ON((psr & IA64_PSR_I) == 0UL);
- }
-
DPRINT(("pfm_close called private=%p\n", filp->private_data));
if (!inode) {
@@ -1815,7 +1816,7 @@
}
if (PFM_IS_FILE(filp) == 0) {
- printk(KERN_ERR "perfmon: pfm_close: bad magic [%d]\n", current->pid);
+ DPRINT(("bad magic for [%d]\n", current->pid));
return -EBADF;
}
@@ -1824,6 +1825,23 @@
printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
return -EBADF;
}
+ /*
+ * remove our file from the async queue, if we use this mode.
+ * This can be done without the context being protected. We come
+ * here when the context has become unreacheable by other tasks.
+ *
+ * We may still have active monitoring at this point and we may
+ * end up in pfm_overflow_handler(). However, fasync_helper()
+ * operates with interrupts disabled and it cleans up the
+ * queue. If the PMU handler is called prior to entering
+ * fasync_helper() then it will send a signal. If it is
+ * invoked after, it will find an empty queue and no
+ * signal will be sent. In both case, we are safe
+ */
+ if (filp->f_flags & FASYNC) {
+ DPRINT(("[%d] cleaning up async_queue=%p\n", current->pid, ctx->ctx_async_queue));
+ pfm_do_fasync (-1, filp, ctx, 0);
+ }
PROTECT_CTX(ctx, flags);
@@ -1832,24 +1850,17 @@
task = PFM_CTX_TASK(ctx);
- /*
- * remove our file from the async queue, if we use it
- */
- if (filp->f_flags & FASYNC) {
- DPRINT(("[%d] before async_queue=%p\n", current->pid, ctx->ctx_async_queue));
- pfm_do_fasync (-1, filp, ctx, 0);
- DPRINT(("[%d] after async_queue=%p\n", current->pid, ctx->ctx_async_queue));
- }
+ regs = ia64_task_regs(task);
- DPRINT(("[%d] ctx_state=%d\n", current->pid, state));
+ DPRINT(("[%d] ctx_state=%d is_current=%d\n",
+ current->pid, state,
+ task == current ? 1 : 0));
if (state == PFM_CTX_UNLOADED || state == PFM_CTX_TERMINATED) {
goto doit;
}
- regs = ia64_task_regs(task);
-
/*
* context still loaded/masked and self monitoring,
* we stop/unload and we destroy right here
@@ -1898,12 +1909,11 @@
ctx->ctx_state = PFM_CTX_TERMINATED;
- DPRINT(("[%d] ctx_state=%d\n", current->pid, state));
+ DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state));
}
goto doit;
}
-
/*
* The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the
@@ -3482,6 +3492,7 @@
pfm_use_debug_registers(struct task_struct *task)
{
pfm_context_t *ctx = task->thread.pfm_context;
+ unsigned long flags;
int ret = 0;
if (pmu_conf.use_rr_dbregs == 0) return 0;
@@ -3503,7 +3514,7 @@
*/
if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
- LOCK_PFS();
+ LOCK_PFS(flags);
/*
* We cannot allow setting breakpoints when system wide monitoring
@@ -3519,7 +3530,7 @@
pfm_sessions.pfs_sys_use_dbregs,
task->pid, ret));
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
return ret;
}
@@ -3535,11 +3546,12 @@
int
pfm_release_debug_registers(struct task_struct *task)
{
+ unsigned long flags;
int ret;
if (pmu_conf.use_rr_dbregs == 0) return 0;
- LOCK_PFS();
+ LOCK_PFS(flags);
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
ret = -1;
@@ -3547,7 +3559,7 @@
pfm_sessions.pfs_ptrace_use_dbregs--;
ret = 0;
}
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
return ret;
}
@@ -3723,7 +3735,6 @@
memset(pfm_stats, 0, sizeof(pfm_stats));
for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
}
-
return 0;
}
@@ -3735,6 +3746,7 @@
{
struct thread_struct *thread = NULL;
pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
+ unsigned long flags;
dbreg_t dbreg;
unsigned int rnum;
int first_time;
@@ -3793,7 +3805,7 @@
* written after the context is loaded
*/
if (is_loaded) {
- LOCK_PFS();
+ LOCK_PFS(flags);
if (first_time && is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs)
@@ -3801,7 +3813,7 @@
else
pfm_sessions.pfs_sys_use_dbregs++;
}
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
}
if (ret != 0) return ret;
@@ -3902,11 +3914,11 @@
* in case it was our first attempt, we undo the global modifications
*/
if (first_time) {
- LOCK_PFS();
+ LOCK_PFS(flags);
if (ctx->ctx_fl_system) {
pfm_sessions.pfs_sys_use_dbregs--;
}
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
ctx->ctx_fl_using_dbreg = 0;
}
/*
@@ -3959,7 +3971,11 @@
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY;
}
-
+ DPRINT(("current [%d] task [%d] ctx_state=%d is_system=%d\n",
+ current->pid,
+ PFM_CTX_TASK(ctx)->pid,
+ state,
+ is_system));
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
@@ -4157,6 +4173,7 @@
struct task_struct *task;
struct thread_struct *thread;
struct pfm_context_t *old;
+ unsigned long flags;
#ifndef CONFIG_SMP
struct task_struct *owner_task = NULL;
#endif
@@ -4217,7 +4234,7 @@
DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
goto error;
}
- LOCK_PFS();
+ LOCK_PFS(flags);
if (is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs) {
@@ -4225,12 +4242,12 @@
ret = -EBUSY;
} else {
pfm_sessions.pfs_sys_use_dbregs++;
- DPRINT(("load [%d] increased sys_use_dbreg=%lu\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
+ DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
set_dbregs = 1;
}
}
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
if (ret) goto error;
}
@@ -4377,9 +4394,9 @@
* we must undo the dbregs setting (for system-wide)
*/
if (ret && set_dbregs) {
- LOCK_PFS();
+ LOCK_PFS(flags);
pfm_sessions.pfs_sys_use_dbregs--;
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
}
/*
* release task, there is now a link with the context
@@ -4605,11 +4622,14 @@
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
break;
}
+ UNPROTECT_CTX(ctx, flags);
+
{ u64 psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(GET_PMU_OWNER());
+ BUG_ON(ia64_psr(regs)->up);
+ BUG_ON(ia64_psr(regs)->pp);
}
- UNPROTECT_CTX(ctx, flags);
/*
* All memory free operations (especially for vmalloc'ed memory)
@@ -5488,7 +5508,7 @@
char *p = page;
struct list_head * pos;
pfm_buffer_fmt_t * entry;
- unsigned long psr;
+ unsigned long psr, flags;
int online_cpus = 0;
int i;
@@ -5528,7 +5548,7 @@
}
}
- LOCK_PFS();
+ LOCK_PFS(flags);
p += sprintf(p, "proc_sessions : %u\n"
"sys_sessions : %u\n"
"sys_use_dbregs : %u\n"
@@ -5537,7 +5557,7 @@
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_sys_use_dbregs,
pfm_sessions.pfs_ptrace_use_dbregs);
- UNLOCK_PFS();
+ UNLOCK_PFS(flags);
spin_lock(&pfm_buffer_fmt_lock);
@@ -5712,10 +5732,6 @@
*/
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
- { u64 foo = pfm_get_psr();
- BUG_ON(foo & ((IA64_PSR_UP|IA64_PSR_PP)));
- }
-
/*
* release ownership of this PMU.
* PM interrupts are masked, so nothing
@@ -5771,6 +5787,8 @@
*/
psr = pfm_get_psr();
+ BUG_ON(foo & (IA64_PSR_I));
+
/*
* stop monitoring:
* This is the last instruction which may generate an overflow
@@ -5785,12 +5803,6 @@
*/
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
-#if 1
- { u64 foo = pfm_get_psr();
- BUG_ON(foo & (IA64_PSR_I));
- BUG_ON(foo & ((IA64_PSR_UP|IA64_PSR_PP)));
- }
-#endif
return;
save_error:
printk(KERN_ERR "perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld\n",
@@ -5805,11 +5817,9 @@
struct thread_struct *t;
unsigned long flags;
-#if 1
- { u64 foo = pfm_get_psr();
- BUG_ON(foo & IA64_PSR_UP);
+ { u64 psr = pfm_get_psr();
+ BUG_ON(psr & IA64_PSR_UP);
}
-#endif
ctx = PFM_GET_CTX(task);
t = &task->thread;
@@ -5851,7 +5861,7 @@
/*
* unfreeze PMU if had pending overflows
*/
- if (t->pmcs[0] & ~1UL) pfm_unfreeze_pmu();
+ if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
/*
* now get can unmask PMU interrupts, they will
@@ -5900,10 +5910,8 @@
flags = pfm_protect_ctx_ctxsw(ctx);
psr = pfm_get_psr();
-#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
-#endif
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
struct pt_regs *regs = ia64_task_regs(task);
@@ -6060,10 +6068,8 @@
t = &task->thread;
psr = pfm_get_psr();
-#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
-#endif
/*
* we restore ALL the debug registers to avoid picking up
@@ -6218,7 +6224,7 @@
/*
* clear whatever overflow status bits there were
*/
- task->thread.pmcs[0] &= ~0x1;
+ task->thread.pmcs[0] = 0;
}
ovfl_val = pmu_conf.ovfl_val;
/*
@@ -6400,6 +6406,11 @@
pfm_clear_psr_pp();
pfm_clear_psr_up();
+ /*
+ * we run with the PMU not frozen at all times
+ */
+ pfm_unfreeze_pmu();
+
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
@@ -6427,49 +6438,75 @@
if (PMD_IS_IMPL(i) == 0) continue;
ia64_set_pmd(i, 0UL);
}
-
- /*
- * we run with the PMU not frozen at all times
- */
- pfm_unfreeze_pmu();
}
/*
* used for debug purposes only
*/
void
-dump_pmu_state(void)
+dump_pmu_state(const char *from)
{
struct task_struct *task;
struct thread_struct *t;
+ struct pt_regs *regs;
pfm_context_t *ctx;
- unsigned long psr;
- int i;
+ unsigned long psr, dcr, info, flags;
+ int i, this_cpu;
+
+ local_irq_save(flags);
- printk("current [%d] %s\n", current->pid, current->comm);
+ this_cpu = smp_processor_id();
+ regs = ia64_task_regs(current);
+ info = PFM_CPUINFO_GET();
+ dcr = ia64_getreg(_IA64_REG_CR_DCR);
+
+ if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
+ this_cpu,
+ from,
+ current->pid,
+ regs->cr_iip,
+ current->comm);
task = GET_PMU_OWNER();
ctx = GET_PMU_CTX();
- printk("owner [%d] ctx=%p\n", task ? task->pid : -1, ctx);
+ printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
psr = pfm_get_psr();
- printk("psr.pp=%ld psr.up=%ld\n", (psr >> IA64_PSR_PP_BIT) &0x1UL, (psr >> IA64_PSR_PP_BIT)&0x1UL);
+ printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
+ this_cpu,
+ ia64_get_pmc(0),
+ psr & IA64_PSR_PP ? 1 : 0,
+ psr & IA64_PSR_UP ? 1 : 0,
+ dcr & IA64_DCR_PP ? 1 : 0,
+ info,
+ ia64_psr(regs)->up,
+ ia64_psr(regs)->pp);
+
+ ia64_psr(regs)->up = 0;
+ ia64_psr(regs)->pp = 0;
t = ¤t->thread;
for (i=1; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
- printk("pmc[%d]=0x%lx tpmc=0x%lx\n", i, ia64_get_pmc(i), t->pmcs[i]);
+ printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
}
for (i=1; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue;
- printk("pmd[%d]=0x%lx tpmd=0x%lx\n", i, ia64_get_pmd(i), t->pmds[i]);
+ printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
}
+
if (ctx) {
- printk("ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
+ printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
+ this_cpu,
ctx->ctx_state,
ctx->ctx_smpl_vaddr,
ctx->ctx_smpl_hdr,
@@ -6477,6 +6514,7 @@
ctx->ctx_msgq_tail,
ctx->ctx_saved_psr_up);
}
+ local_irq_restore(flags);
}
/*
@@ -6499,10 +6537,8 @@
PFM_SET_WORK_PENDING(task, 0);
/*
- * restore default psr settings
+ * the psr bits are already set properly in copy_threads()
*/
- ia64_psr(regs)->pp = ia64_psr(regs)->up = 0;
- ia64_psr(regs)->sp = 1;
}
#else /* !CONFIG_PERFMON */
asmlinkage long
diff -Nru a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
--- a/arch/ia64/kernel/perfmon_default_smpl.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/perfmon_default_smpl.c Sat Oct 25 11:45:09 2003
@@ -93,17 +93,17 @@
hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION;
hdr->hdr_buf_size = arg->buf_size;
- hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
- hdr->hdr_last_pos = (void *)((unsigned long)buf)+arg->buf_size;
+ hdr->hdr_cur_offs = sizeof(*hdr);
hdr->hdr_overflows = 0UL;
hdr->hdr_count = 0UL;
- DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u\n",
+ DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n",
task->pid,
buf,
hdr->hdr_buf_size,
sizeof(*hdr),
- hdr->hdr_version));
+ hdr->hdr_version,
+ hdr->hdr_cur_offs));
return 0;
}
@@ -125,8 +125,8 @@
}
hdr = (pfm_default_smpl_hdr_t *)buf;
- cur = hdr->hdr_cur_pos;
- last = hdr->hdr_last_pos;
+ cur = buf+hdr->hdr_cur_offs;
+ last = buf+hdr->hdr_buf_size;
ovfl_pmd = arg->ovfl_pmd;
ovfl_notify = arg->ovfl_notify;
@@ -191,7 +191,7 @@
/*
* update position for next entry
*/
- hdr->hdr_cur_pos = cur + sizeof(*ent) + (npmds << 3);
+ hdr->hdr_cur_offs += sizeof(*ent) + (npmds << 3);
/*
* keep same ovfl_pmds, ovfl_notify
@@ -212,10 +212,9 @@
hdr->hdr_overflows++;
/*
- * if no notification is needed, then we saturate the buffer
+ * if no notification requested, then we saturate the buffer
*/
if (ovfl_notify == 0) {
- hdr->hdr_count = 0UL;
arg->ovfl_ctrl.bits.notify_user = 0;
arg->ovfl_ctrl.bits.block_task = 0;
arg->ovfl_ctrl.bits.mask_monitoring = 1;
@@ -236,8 +235,8 @@
hdr = (pfm_default_smpl_hdr_t *)buf;
- hdr->hdr_count = 0UL;
- hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
+ hdr->hdr_count = 0UL;
+ hdr->hdr_cur_offs = sizeof(*hdr);
ctrl->bits.mask_monitoring = 0;
ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
diff -Nru a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
--- a/arch/ia64/kernel/process.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/process.c Sat Oct 25 11:45:09 2003
@@ -25,6 +25,7 @@
#include
#include
+#include
#include
#include
#include
@@ -324,7 +325,7 @@
memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (user_mode(child_ptregs)) {
- if (clone_flags & CLONE_SETTLS)
+ if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16;
@@ -352,9 +353,13 @@
/* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16;
- /* stop some PSR bits from being inherited: */
+ /* stop some PSR bits from being inherited.
+ * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
+ * therefore we must specify them explicitly here and not include them in
+ * IA64_PSR_BITS_TO_CLEAR.
+ */
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
- & ~IA64_PSR_BITS_TO_CLEAR);
+ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
/*
* NOTE: The calling convention considers all floating point
@@ -383,8 +388,11 @@
* If we're cloning an IA32 task then save the IA32 extra
* state from the current task to the new task
*/
- if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ if (IS_IA32_PROCESS(ia64_task_regs(current))) {
ia32_save_state(p);
+ if (clone_flags & CLONE_SETTLS)
+ retval = ia32_clone_tls(p, child_ptregs);
+ }
#endif
#ifdef CONFIG_PERFMON
@@ -685,12 +693,16 @@
(*efi.reset_system)(EFI_RESET_WARM, 0, 0, 0);
}
+EXPORT_SYMBOL(machine_restart);
+
void
machine_halt (void)
{
cpu_halt();
}
+EXPORT_SYMBOL(machine_halt);
+
void
machine_power_off (void)
{
@@ -698,3 +710,5 @@
pm_power_off();
machine_halt();
}
+
+EXPORT_SYMBOL(machine_power_off);
diff -Nru a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
--- a/arch/ia64/kernel/setup.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/setup.c Sat Oct 25 11:45:09 2003
@@ -327,9 +327,11 @@
* because we don't *really* know whether there's anything there, but we hope that
* all new boxes will implement HCDP.
*/
- extern unsigned char acpi_legacy_devices;
- if (!efi.hcdp && acpi_legacy_devices)
- setup_serial_legacy();
+ {
+ extern unsigned char acpi_legacy_devices;
+ if (!efi.hcdp && acpi_legacy_devices)
+ setup_serial_legacy();
+ }
#endif
#ifdef CONFIG_VT
diff -Nru a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
--- a/arch/ia64/kernel/smpboot.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/smpboot.c Sat Oct 25 11:45:09 2003
@@ -35,6 +35,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -312,6 +313,9 @@
local_irq_enable();
calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
+#ifdef CONFIG_IA32_SUPPORT
+ ia32_gdt_init();
+#endif
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/*
diff -Nru a/arch/ia64/kernel/unwind_i.h b/arch/ia64/kernel/unwind_i.h
--- a/arch/ia64/kernel/unwind_i.h Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/kernel/unwind_i.h Sat Oct 25 11:45:09 2003
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2002 Hewlett-Packard Co
+ * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang
*
* Kernel unwind support.
@@ -43,12 +43,6 @@
u64 header;
u64 desc[0]; /* unwind descriptors */
/* personality routine and language-specific data follow behind descriptors */
-};
-
-struct unw_table_entry {
- u64 start_offset;
- u64 end_offset;
- u64 info_offset;
};
struct unw_table {
diff -Nru a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
--- a/arch/ia64/lib/checksum.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/lib/checksum.c Sat Oct 25 11:45:09 2003
@@ -1,8 +1,8 @@
/*
* Network checksum routines
*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 Stephane Eranian
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ * Stephane Eranian
*
* Most of the code coming from arch/alpha/lib/checksum.c
*
@@ -10,6 +10,7 @@
* in an architecture-specific manner due to speed..
*/
+#include
#include
#include
@@ -40,6 +41,8 @@
((unsigned long) proto << 8));
}
+EXPORT_SYMBOL(csum_tcpudp_magic);
+
unsigned int
csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short len,
unsigned short proto, unsigned int sum)
@@ -84,6 +87,7 @@
return result;
}
+EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
@@ -94,3 +98,5 @@
{
return ~do_csum(buff,len);
}
+
+EXPORT_SYMBOL(ip_compute_csum);
diff -Nru a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
--- a/arch/ia64/lib/csum_partial_copy.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/lib/csum_partial_copy.c Sat Oct 25 11:45:09 2003
@@ -1,12 +1,13 @@
/*
* Network Checksum & Copy routine
*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 Stephane Eranian
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ * Stephane Eranian
*
* Most of the code has been imported from Linux/Alpha
*/
+#include
#include
#include
@@ -146,3 +147,4 @@
return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff -Nru a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
--- a/arch/ia64/mm/discontig.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/mm/discontig.c Sat Oct 25 11:45:09 2003
@@ -186,7 +186,7 @@
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (node == node_cpuid[cpu].nid) {
- memcpy(cpu_data, __phys_per_cpu_start,
+ memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end-__per_cpu_start);
__per_cpu_offset[cpu] =
(char*)__va(cpu_data) -
diff -Nru a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
--- a/arch/ia64/mm/init.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/mm/init.c Sat Oct 25 11:45:09 2003
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -43,6 +44,8 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long vmalloc_end = VMALLOC_END_INIT;
struct page *vmem_map;
+
+ EXPORT_SYMBOL(vmem_map);
#endif
static int pgt_cache_water[2] = { 25, 50 };
@@ -555,6 +558,6 @@
setup_gate(); /* setup gate pages before we free up boot memory... */
#ifdef CONFIG_IA32_SUPPORT
- ia32_gdt_init();
+ ia32_boot_gdt_init();
#endif
}
diff -Nru a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
--- a/arch/ia64/mm/numa.c Sat Oct 25 11:45:09 2003
+++ b/arch/ia64/mm/numa.c Sat Oct 25 11:45:09 2003
@@ -11,12 +11,19 @@
*/
#include
+#include
#include
+#include
#include
+#include
#include
#include
#include
+static struct memblk *sysfs_memblks;
+static struct node *sysfs_nodes;
+static struct cpu *sysfs_cpus;
+
/*
* The following structures are usually initialized by ACPI or
* similar mechanisms and describe the NUMA characteristics of the machine.
@@ -43,3 +50,52 @@
return (i < num_memblks) ? node_memblk[i].nid : (num_memblks ? -1 : 0);
}
+
+static int __init topology_init(void)
+{
+ int i, err = 0;
+
+ sysfs_nodes = kmalloc(sizeof(struct node) * numnodes, GFP_KERNEL);
+ if (!sysfs_nodes) {
+ err = -ENOMEM;
+ goto out;
+ }
+ memset(sysfs_nodes, 0, sizeof(struct node) * numnodes);
+
+ sysfs_memblks = kmalloc(sizeof(struct memblk) * num_memblks,
+ GFP_KERNEL);
+ if (!sysfs_memblks) {
+ kfree(sysfs_nodes);
+ err = -ENOMEM;
+ goto out;
+ }
+ memset(sysfs_memblks, 0, sizeof(struct memblk) * num_memblks);
+
+ sysfs_cpus = kmalloc(sizeof(struct cpu) * NR_CPUS, GFP_KERNEL);
+ if (!sysfs_cpus) {
+ kfree(sysfs_memblks);
+ kfree(sysfs_nodes);
+ err = -ENOMEM;
+ goto out;
+ }
+ memset(sysfs_cpus, 0, sizeof(struct cpu) * NR_CPUS);
+
+ for (i = 0; i < numnodes; i++)
+ if ((err = register_node(&sysfs_nodes[i], i, 0)))
+ goto out;
+
+ for (i = 0; i < num_memblks; i++)
+ if ((err = register_memblk(&sysfs_memblks[i], i,
+ &sysfs_nodes[memblk_to_node(i)])))
+ goto out;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_online(i))
+ if((err = register_cpu(&sysfs_cpus[i], i,
+ &sysfs_nodes[cpu_to_node(i)])))
+ goto out;
+ out:
+ return err;
+}
+
+__initcall(topology_init);
diff -Nru a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
--- a/arch/sparc/kernel/pcic.c Sat Oct 25 11:45:09 2003
+++ b/arch/sparc/kernel/pcic.c Sat Oct 25 11:45:09 2003
@@ -785,6 +785,15 @@
if (lost)
usec += lost * (1000000 / HZ);
}
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0) && usec > tickadj)
+ usec = tickadj;
+
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
diff -Nru a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
--- a/arch/sparc/kernel/time.c Sat Oct 25 11:45:09 2003
+++ b/arch/sparc/kernel/time.c Sat Oct 25 11:45:09 2003
@@ -490,6 +490,15 @@
if (lost)
usec += lost * (1000000 / HZ);
}
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0) && usec > tickadj)
+ usec = tickadj;
+
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
diff -Nru a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
--- a/arch/sparc64/Kconfig Sat Oct 25 11:45:09 2003
+++ b/arch/sparc64/Kconfig Sat Oct 25 11:45:09 2003
@@ -211,6 +211,22 @@
bool
default y
+choice
+ prompt "SPARC64 Huge TLB Page Size"
+ depends on HUGETLB_PAGE
+ default HUGETLB_PAGE_SIZE_4MB
+
+config HUGETLB_PAGE_SIZE_4MB
+ bool "4MB"
+
+config HUGETLB_PAGE_SIZE_512K
+ bool "512K"
+
+config HUGETLB_PAGE_SIZE_64K
+ bool "64K"
+
+endchoice
+
config GENERIC_ISA_DMA
bool
default y
diff -Nru a/arch/sparc64/defconfig b/arch/sparc64/defconfig
--- a/arch/sparc64/defconfig Sat Oct 25 11:45:09 2003
+++ b/arch/sparc64/defconfig Sat Oct 25 11:45:09 2003
@@ -62,6 +62,9 @@
CONFIG_SPARC64=y
CONFIG_HOTPLUG=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_HUGETLB_PAGE_SIZE_4MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
CONFIG_GENERIC_ISA_DMA=y
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
@@ -315,6 +318,11 @@
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
+CONFIG_SCSI_SATA=y
+CONFIG_SCSI_SATA_SVW=m
+CONFIG_SCSI_ATA_PIIX=m
+CONFIG_SCSI_SATA_PROMISE=m
+CONFIG_SCSI_SATA_VIA=m
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_CPQFCTS is not set
CONFIG_SCSI_DMX3191D=m
@@ -713,7 +721,6 @@
CONFIG_EPIC100=m
CONFIG_SUNDANCE=m
CONFIG_SUNDANCE_MMIO=y
-# CONFIG_TLAN is not set
CONFIG_VIA_RHINE=m
# CONFIG_VIA_RHINE_MMIO is not set
@@ -784,7 +791,6 @@
#
# CONFIG_TR is not set
CONFIG_NET_FC=y
-# CONFIG_RCPCI is not set
CONFIG_SHAPER=m
#
@@ -1115,8 +1121,8 @@
CONFIG_DEVPTS_FS_XATTR=y
# CONFIG_DEVPTS_FS_SECURITY is not set
# CONFIG_TMPFS is not set
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HUGETLB_PAGE is not set
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
#
diff -Nru a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
--- a/arch/sparc64/kernel/time.c Sat Oct 25 11:45:09 2003
+++ b/arch/sparc64/kernel/time.c Sat Oct 25 11:45:09 2003
@@ -1149,6 +1149,15 @@
if (lost)
usec += lost * (1000000 / HZ);
}
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0) && usec > tickadj)
+ usec = tickadj;
+
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
diff -Nru a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
--- a/arch/sparc64/mm/hugetlbpage.c Sat Oct 25 11:45:09 2003
+++ b/arch/sparc64/mm/hugetlbpage.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* SPARC64 Huge TLB page support.
*
- * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
*/
#include
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
@@ -19,87 +20,68 @@
#include
#include
-static struct vm_operations_struct hugetlb_vm_ops;
-struct list_head htlbpage_freelist;
-spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
-extern long htlbpagemem;
-
-static void zap_hugetlb_resources(struct vm_area_struct *);
-void free_huge_page(struct page *page);
-
-#define MAX_ID 32
-struct htlbpagekey {
- struct inode *in;
- int key;
-} htlbpagek[MAX_ID];
+static long htlbpagemem;
+int htlbpage_max;
+static long htlbzone_pages;
-static struct inode *find_key_inode(int key)
+static struct list_head hugepage_freelists[MAX_NUMNODES];
+static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
+
+static void enqueue_huge_page(struct page *page)
{
- int i;
+ list_add(&page->list,
+ &hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
+}
- for (i = 0; i < MAX_ID; i++) {
- if (htlbpagek[i].key == key)
- return htlbpagek[i].in;
+static struct page *dequeue_huge_page(void)
+{
+ int nid = numa_node_id();
+ struct page *page = NULL;
+
+ if (list_empty(&hugepage_freelists[nid])) {
+ for (nid = 0; nid < MAX_NUMNODES; ++nid)
+ if (!list_empty(&hugepage_freelists[nid]))
+ break;
}
- return NULL;
+ if (nid >= 0 && nid < MAX_NUMNODES &&
+ !list_empty(&hugepage_freelists[nid])) {
+ page = list_entry(hugepage_freelists[nid].next,
+ struct page, list);
+ list_del(&page->list);
+ }
+ return page;
}
-static struct page *alloc_hugetlb_page(void)
+static struct page *alloc_fresh_huge_page(void)
{
- struct list_head *curr, *head;
+ static int nid = 0;
struct page *page;
+ page = alloc_pages_node(nid, GFP_HIGHUSER, HUGETLB_PAGE_ORDER);
+ nid = (nid + 1) % numnodes;
+ return page;
+}
- spin_lock(&htlbpage_lock);
+static void free_huge_page(struct page *page);
- head = &htlbpage_freelist;
- curr = head->next;
+static struct page *alloc_hugetlb_page(void)
+{
+ struct page *page;
- if (curr == head) {
+ spin_lock(&htlbpage_lock);
+ page = dequeue_huge_page();
+ if (!page) {
spin_unlock(&htlbpage_lock);
return NULL;
}
- page = list_entry(curr, struct page, list);
- list_del(curr);
htlbpagemem--;
-
spin_unlock(&htlbpage_lock);
-
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
memset(page_address(page), 0, HPAGE_SIZE);
-
return page;
}
-static void free_hugetlb_page(struct page *page)
-{
- spin_lock(&htlbpage_lock);
- if ((page->mapping != NULL) && (page_count(page) == 2)) {
- struct inode *inode = page->mapping->host;
- int i;
-
- ClearPageDirty(page);
- remove_from_page_cache(page);
- set_page_count(page, 1);
- if ((inode->i_size -= HPAGE_SIZE) == 0) {
- for (i = 0; i < MAX_ID; i++) {
- if (htlbpagek[i].key == inode->i_ino) {
- htlbpagek[i].key = 0;
- htlbpagek[i].in = NULL;
- break;
- }
- }
- kfree(inode);
- }
- }
- if (put_page_testzero(page)) {
- list_add(&page->list, &htlbpage_freelist);
- htlbpagemem++;
- }
- spin_unlock(&htlbpage_lock);
-}
-
-static pte_t *huge_pte_alloc_map(struct mm_struct *mm, unsigned long addr)
+static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pmd_t *pmd;
@@ -114,7 +96,7 @@
return pte;
}
-static pte_t *huge_pte_offset_map(struct mm_struct *mm, unsigned long addr)
+static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pmd_t *pmd;
@@ -129,109 +111,30 @@
return pte;
}
-static pte_t *huge_pte_offset_map_nested(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd) {
- pmd = pmd_offset(pgd, addr);
- if (pmd)
- pte = pte_offset_map_nested(pmd, addr);
- }
- return pte;
-}
-
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZ4MB; } while (0)
+#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, pte_t * page_table, int write_access)
{
- pte_t entry;
unsigned long i;
+ pte_t entry;
mm->rss += (HPAGE_SIZE / PAGE_SIZE);
+ if (write_access)
+ entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
+ vma->vm_page_prot)));
+ else
+ entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+ entry = pte_mkyoung(entry);
+ mk_pte_huge(entry);
+
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
- if (write_access)
- entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
- vma->vm_page_prot)));
- else
- entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
- entry = pte_mkyoung(entry);
- mk_pte_huge(entry);
- pte_val(entry) += (i << PAGE_SHIFT);
set_pte(page_table, entry);
page_table++;
- }
-}
-
-static int anon_get_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
- int write_access, pte_t * page_table)
-{
- struct page *page;
-
- page = alloc_hugetlb_page();
- if (page == NULL)
- return -1;
- set_huge_pte(mm, vma, page, page_table, write_access);
- return 1;
-}
-
-static int
-make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- pte_t *pte;
- int write;
-
- vma = find_vma(mm, addr);
- if (!vma)
- goto out_error1;
-
- write = (vma->vm_flags & VM_WRITE) != 0;
- if ((vma->vm_end - vma->vm_start) & (HPAGE_SIZE - 1))
- goto out_error1;
-
- spin_lock(&mm->page_table_lock);
- do {
- int err;
- pte = huge_pte_alloc_map(mm, addr);
- err = (!pte ||
- !pte_none(*pte) ||
- (anon_get_hugetlb_page(mm, vma,
- write ? VM_WRITE : VM_READ,
- pte) == -1));
- if (pte)
- pte_unmap(pte);
- if (err)
- goto out_error;
-
- addr += HPAGE_SIZE;
- } while (addr < end);
- spin_unlock(&mm->page_table_lock);
-
- vma->vm_flags |= (VM_HUGETLB | VM_RESERVED);
- if (flags & MAP_PRIVATE)
- vma->vm_flags |= VM_DONTCOPY;
- vma->vm_ops = &hugetlb_vm_ops;
- return 0;
-
-out_error:
- if (addr > vma->vm_start) {
- vma->vm_end = addr;
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
- zap_hugetlb_resources(vma);
- flush_tlb_range(vma, vma->vm_start, vma->vm_end);
- vma->vm_end = end;
+ pte_val(entry) += PAGE_SIZE;
}
- spin_unlock(&mm->page_table_lock);
- out_error1:
- return -1;
}
/*
@@ -253,18 +156,15 @@
struct page *ptepage;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
+ int i;
while (addr < end) {
- unsigned long i;
-
- dst_pte = huge_pte_alloc_map(dst, addr);
+ dst_pte = huge_pte_alloc(dst, addr);
if (!dst_pte)
goto nomem;
-
- src_pte = huge_pte_offset_map_nested(src, addr);
+ src_pte = huge_pte_offset(src, addr);
+ BUG_ON(!src_pte || pte_none(*src_pte));
entry = *src_pte;
- pte_unmap_nested(src_pte);
-
ptepage = pte_page(entry);
get_page(ptepage);
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
@@ -272,8 +172,6 @@
pte_val(entry) += PAGE_SIZE;
dst_pte++;
}
- pte_unmap(dst_pte - (1 << HUGETLB_PAGE_ORDER));
-
dst->rss += (HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
@@ -285,161 +183,333 @@
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
- unsigned long *st, int *length, int i)
+ unsigned long *position, int *length, int i)
{
- pte_t *ptep, pte;
- unsigned long start = *st;
- unsigned long pstart;
- int len = *length;
- struct page *page;
+ unsigned long vaddr = *position;
+ int remainder = *length;
- do {
- pstart = start;
- ptep = huge_pte_offset_map(mm, start);
- pte = *ptep;
+ WARN_ON(!is_vm_hugetlb_page(vma));
-back1:
- page = pte_page(pte);
+ while (vaddr < vma->vm_end && remainder) {
if (pages) {
- page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
+ pte_t *pte;
+ struct page *page;
+
+ pte = huge_pte_offset(mm, vaddr);
+
+ /* hugetlb should be locked, and hence, prefaulted */
+ BUG_ON(!pte || pte_none(*pte));
+
+ page = pte_page(*pte);
+
+ WARN_ON(!PageCompound(page));
+
get_page(page);
pages[i] = page;
}
+
if (vmas)
vmas[i] = vma;
- i++;
- len--;
- start += PAGE_SIZE;
- if (((start & HPAGE_MASK) == pstart) && len &&
- (start < vma->vm_end))
- goto back1;
- pte_unmap(ptep);
- } while (len && start < vma->vm_end);
+ vaddr += PAGE_SIZE;
+ --remainder;
+ ++i;
+ }
+
+ *length = remainder;
+ *position = vaddr;
- *length = len;
- *st = start;
return i;
}
-static void zap_hugetlb_resources(struct vm_area_struct *mpnt)
+struct page *follow_huge_addr(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long address, int write)
+{
+ return NULL;
+}
+
+struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
+{
+ return NULL;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write)
+{
+ return NULL;
+}
+
+static void free_huge_page(struct page *page)
+{
+ BUG_ON(page_count(page));
+ BUG_ON(page->mapping);
+
+ INIT_LIST_HEAD(&page->list);
+
+ spin_lock(&htlbpage_lock);
+ enqueue_huge_page(page);
+ htlbpagemem++;
+ spin_unlock(&htlbpage_lock);
+}
+
+void huge_page_release(struct page *page)
+{
+ if (!put_page_testzero(page))
+ return;
+
+ free_huge_page(page);
+}
+
+void unmap_hugepage_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
- struct mm_struct *mm = mpnt->vm_mm;
- unsigned long len, addr, end;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address;
+ pte_t *pte;
struct page *page;
- pte_t *ptep;
+ int i;
- addr = mpnt->vm_start;
- end = mpnt->vm_end;
- len = end - addr;
- do {
- unsigned long i;
+ BUG_ON(start & (HPAGE_SIZE - 1));
+ BUG_ON(end & (HPAGE_SIZE - 1));
- ptep = huge_pte_offset_map(mm, addr);
- page = pte_page(*ptep);
+ for (address = start; address < end; address += HPAGE_SIZE) {
+ pte = huge_pte_offset(mm, address);
+ BUG_ON(!pte);
+ if (pte_none(*pte))
+ continue;
+ page = pte_page(*pte);
+ huge_page_release(page);
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
- pte_clear(ptep);
- ptep++;
+ pte_clear(pte);
+ pte++;
}
- pte_unmap(ptep - (1 << HUGETLB_PAGE_ORDER));
- free_hugetlb_page(page);
- addr += HPAGE_SIZE;
- } while (addr < end);
- mm->rss -= (len >> PAGE_SHIFT);
- mpnt->vm_ops = NULL;
+ }
+ mm->rss -= (end - start) >> PAGE_SHIFT;
+ flush_tlb_range(vma, start, end);
}
-static void unlink_vma(struct vm_area_struct *mpnt)
+void zap_hugepage_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long length)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ spin_lock(&mm->page_table_lock);
+ unmap_hugepage_range(vma, start, start + length);
+ spin_unlock(&mm->page_table_lock);
+}
+
+int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ unsigned long addr;
+ int ret = 0;
+
+ BUG_ON(vma->vm_start & ~HPAGE_MASK);
+ BUG_ON(vma->vm_end & ~HPAGE_MASK);
- vma = mm->mmap;
- if (vma == mpnt) {
- mm->mmap = vma->vm_next;
- } else {
- while (vma->vm_next != mpnt) {
- vma = vma->vm_next;
+ spin_lock(&mm->page_table_lock);
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
+ unsigned long idx;
+ pte_t *pte = huge_pte_alloc(mm, addr);
+ struct page *page;
+
+ if (!pte) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!pte_none(*pte))
+ continue;
+
+ idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+ + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+ page = find_get_page(mapping, idx);
+ if (!page) {
+ /* charge the fs quota first */
+ if (hugetlb_get_quota(mapping)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ page = alloc_hugetlb_page();
+ if (!page) {
+ hugetlb_put_quota(mapping);
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
+ unlock_page(page);
+ if (ret) {
+ hugetlb_put_quota(mapping);
+ free_huge_page(page);
+ goto out;
+ }
}
- vma->vm_next = mpnt->vm_next;
+ set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
}
- rb_erase(&mpnt->vm_rb, &mm->mm_rb);
- mm->mmap_cache = NULL;
- mm->map_count--;
+out:
+ spin_unlock(&mm->page_table_lock);
+ return ret;
}
-int free_hugepages(struct vm_area_struct *mpnt)
+static void update_and_free_page(struct page *page)
{
- unlink_vma(mpnt);
-
- flush_cache_range(mpnt, mpnt->vm_start, mpnt->vm_end);
- zap_hugetlb_resources(mpnt);
- flush_tlb_range(mpnt, mpnt->vm_start, mpnt->vm_end);
+ int j;
+ struct page *map;
- kmem_cache_free(vm_area_cachep, mpnt);
- return 1;
+ map = page;
+ htlbzone_pages--;
+ for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
+ map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
+ 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
+ 1 << PG_private | 1<< PG_writeback);
+ set_page_count(map, 0);
+ map++;
+ }
+ set_page_count(page, 1);
+ __free_pages(page, HUGETLB_PAGE_ORDER);
}
-extern long htlbzone_pages;
-extern struct list_head htlbpage_freelist;
-
-int set_hugetlb_mem_size(int count)
+static int try_to_free_low(int count)
{
- int j, lcount;
+ struct list_head *p;
struct page *page, *map;
+ map = NULL;
+ spin_lock(&htlbpage_lock);
+ /* all lowmem is on node 0 */
+ list_for_each(p, &hugepage_freelists[0]) {
+ if (map) {
+ list_del(&map->list);
+ update_and_free_page(map);
+ htlbpagemem--;
+ map = NULL;
+ if (++count == 0)
+ break;
+ }
+ page = list_entry(p, struct page, list);
+ if (!PageHighMem(page))
+ map = page;
+ }
+ if (map) {
+ list_del(&map->list);
+ update_and_free_page(map);
+ htlbpagemem--;
+ count++;
+ }
+ spin_unlock(&htlbpage_lock);
+ return count;
+}
+
+static int set_hugetlb_mem_size(int count)
+{
+ int lcount;
+ struct page *page;
+
if (count < 0)
lcount = count;
else
lcount = count - htlbzone_pages;
+ if (lcount == 0)
+ return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
- page = alloc_pages(GFP_ATOMIC, HUGETLB_PAGE_ORDER);
+ page = alloc_fresh_huge_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
- list_add(&page->list, &htlbpage_freelist);
+ enqueue_huge_page(page);
htlbpagemem++;
htlbzone_pages++;
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
-
/* Shrink the memory size. */
+ lcount = try_to_free_low(lcount);
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
-
spin_lock(&htlbpage_lock);
- htlbzone_pages--;
+ update_and_free_page(page);
spin_unlock(&htlbpage_lock);
-
- map = page;
- for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
- map->flags &= ~(1UL << PG_locked | 1UL << PG_error |
- 1UL << PG_referenced |
- 1UL << PG_dirty | 1UL << PG_active |
- 1UL << PG_private | 1UL << PG_writeback);
- set_page_count(page, 0);
- map++;
- }
- set_page_count(page, 1);
- __free_pages(page, HUGETLB_PAGE_ORDER);
}
return (int) htlbzone_pages;
}
-static struct page *
-hugetlb_nopage(struct vm_area_struct *vma, unsigned long address, int unused)
+int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+ struct file *file, void *buffer, size_t *length)
+{
+ proc_dointvec(table, write, file, buffer, length);
+ htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
+ return 0;
+}
+
+static int __init hugetlb_setup(char *s)
+{
+ if (sscanf(s, "%d", &htlbpage_max) <= 0)
+ htlbpage_max = 0;
+ return 1;
+}
+__setup("hugepages=", hugetlb_setup);
+
+static int __init hugetlb_init(void)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < MAX_NUMNODES; ++i)
+ INIT_LIST_HEAD(&hugepage_freelists[i]);
+
+ for (i = 0; i < htlbpage_max; ++i) {
+ page = alloc_fresh_huge_page();
+ if (!page)
+ break;
+ spin_lock(&htlbpage_lock);
+ enqueue_huge_page(page);
+ spin_unlock(&htlbpage_lock);
+ }
+ htlbpage_max = htlbpagemem = htlbzone_pages = i;
+ printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
+ return 0;
+}
+module_init(hugetlb_init);
+
+int hugetlb_report_meminfo(char *buf)
+{
+ return sprintf(buf,
+ "HugePages_Total: %5lu\n"
+ "HugePages_Free: %5lu\n"
+ "Hugepagesize: %5lu kB\n",
+ htlbzone_pages,
+ htlbpagemem,
+ HPAGE_SIZE/1024);
+}
+
+int is_hugepage_mem_enough(size_t size)
+{
+ return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
+}
+
+/*
+ * We cannot handle pagefaults against hugetlb pages at all. They cause
+ * handle_mm_fault() to try to instantiate regular-sized pages in the
+ * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
+ * this far.
+ */
+static struct page *hugetlb_nopage(struct vm_area_struct *vma,
+ unsigned long address, int unused)
{
BUG();
return NULL;
}
-static struct vm_operations_struct hugetlb_vm_ops = {
+struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
- .close = zap_hugetlb_resources,
};
diff -Nru a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
--- a/arch/sparc64/mm/init.c Sat Oct 25 11:45:09 2003
+++ b/arch/sparc64/mm/init.c Sat Oct 25 11:45:09 2003
@@ -1166,7 +1166,11 @@
pte_t *pte;
#if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */
+ set_page_count(page, 1);
+ ClearPageCompound(page);
+
set_page_count((page + 1), 1);
+ ClearPageCompound(page + 1);
#endif
paddr = (unsigned long) page_address(page);
memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
@@ -1680,13 +1684,6 @@
}
}
-#ifdef CONFIG_HUGETLB_PAGE
-long htlbpagemem = 0;
-int htlbpage_max;
-long htlbzone_pages;
-extern struct list_head htlbpage_freelist;
-#endif
-
void __init mem_init(void)
{
unsigned long codepages, datapages, initpages;
@@ -1763,32 +1760,6 @@
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cheetah_ecache_flush_init();
-#ifdef CONFIG_HUGETLB_PAGE
- {
- long i, j;
- struct page *page, *map;
-
- /* For now reserve quarter for hugetlb_pages. */
- htlbzone_pages = (num_physpages >> ((HPAGE_SHIFT - PAGE_SHIFT) + 2)) ;
-
- /* Will make this kernel command line. */
- INIT_LIST_HEAD(&htlbpage_freelist);
- for (i = 0; i < htlbzone_pages; i++) {
- page = alloc_pages(GFP_ATOMIC, HUGETLB_PAGE_ORDER);
- if (page == NULL)
- break;
- map = page;
- for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
- SetPageReserved(map);
- map++;
- }
- list_add(&page->list, &htlbpage_freelist);
- }
- printk("Total Huge_TLB_Page memory pages allocated %ld\n", i);
- htlbzone_pages = htlbpagemem = i;
- htlbpage_max = i;
- }
-#endif
}
void free_initmem (void)
diff -Nru a/arch/v850/kernel/as85ep1.c b/arch/v850/kernel/as85ep1.c
--- a/arch/v850/kernel/as85ep1.c Sat Oct 25 11:45:09 2003
+++ b/arch/v850/kernel/as85ep1.c Sat Oct 25 11:45:09 2003
@@ -114,22 +114,10 @@
void __init mach_reserve_bootmem ()
{
- extern char _root_fs_image_start, _root_fs_image_end;
- u32 root_fs_image_start = (u32)&_root_fs_image_start;
- u32 root_fs_image_end = (u32)&_root_fs_image_end;
-
if (SDRAM_ADDR < RAM_END && SDRAM_ADDR > RAM_START)
/* We can't use the space between SRAM and SDRAM, so
prevent the kernel from trying. */
reserve_bootmem (SRAM_END, SDRAM_ADDR - SRAM_END);
-
- /* Reserve the memory used by the root filesystem image if it's
- in RAM. */
- if (root_fs_image_end > root_fs_image_start
- && root_fs_image_start >= RAM_START
- && root_fs_image_start < RAM_END)
- reserve_bootmem (root_fs_image_start,
- root_fs_image_end - root_fs_image_start);
}
void mach_gettimeofday (struct timespec *tv)
diff -Nru a/arch/v850/kernel/rte_me2_cb.c b/arch/v850/kernel/rte_me2_cb.c
--- a/arch/v850/kernel/rte_me2_cb.c Sat Oct 25 11:45:09 2003
+++ b/arch/v850/kernel/rte_me2_cb.c Sat Oct 25 11:45:09 2003
@@ -53,19 +53,6 @@
*ram_len = RAM_END - RAM_START;
}
-void __init mach_reserve_bootmem ()
-{
- extern char _root_fs_image_start, _root_fs_image_end;
- u32 root_fs_image_start = (u32)&_root_fs_image_start;
- u32 root_fs_image_end = (u32)&_root_fs_image_end;
-
- /* Reserve the memory used by the root filesystem image if it's
- in RAM. */
- if (root_fs_image_start >= RAM_START && root_fs_image_start < RAM_END)
- reserve_bootmem (root_fs_image_start,
- root_fs_image_end - root_fs_image_start);
-}
-
void mach_gettimeofday (struct timespec *tv)
{
tv->tv_sec = 0;
@@ -230,8 +217,10 @@
CB_PIC_INT1M &= ~(1 << (irq - CB_PIC_BASE_IRQ));
}
-static void cb_pic_handle_irq (int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t cb_pic_handle_irq (int irq, void *dev_id,
+ struct pt_regs *regs)
{
+ irqreturn_t rval = IRQ_NONE;
unsigned status = CB_PIC_INTR;
unsigned enable = CB_PIC_INT1M;
@@ -257,13 +246,16 @@
/* Recursively call handle_irq to handle it. */
handle_irq (irq, regs);
+ rval = IRQ_HANDLED;
} while (status);
}
CB_PIC_INTEN |= CB_PIC_INT1EN;
-}
+ return rval;
+}
+
static void irq_nop (unsigned irq) { }
static unsigned cb_pic_startup_irq (unsigned irq)
diff -Nru a/arch/v850/kernel/rte_nb85e_cb.c b/arch/v850/kernel/rte_nb85e_cb.c
--- a/arch/v850/kernel/rte_nb85e_cb.c Sat Oct 25 11:45:09 2003
+++ b/arch/v850/kernel/rte_nb85e_cb.c Sat Oct 25 11:45:09 2003
@@ -54,21 +54,6 @@
*ram_len = SDRAM_SIZE;
}
-void __init mach_reserve_bootmem ()
-{
- extern char _root_fs_image_start, _root_fs_image_end;
- u32 root_fs_image_start = (u32)&_root_fs_image_start;
- u32 root_fs_image_end = (u32)&_root_fs_image_end;
-
- /* Reserve the memory used by the root filesystem image if it's
- in SDRAM. */
- if (root_fs_image_end > root_fs_image_start
- && root_fs_image_start >= SDRAM_ADDR
- && root_fs_image_start < (SDRAM_ADDR + SDRAM_SIZE))
- reserve_bootmem (root_fs_image_start,
- root_fs_image_end - root_fs_image_start);
-}
-
void mach_gettimeofday (struct timespec *tv)
{
tv->tv_sec = 0;
diff -Nru a/arch/v850/kernel/sim85e2.c b/arch/v850/kernel/sim85e2.c
--- a/arch/v850/kernel/sim85e2.c Sat Oct 25 11:45:09 2003
+++ b/arch/v850/kernel/sim85e2.c Sat Oct 25 11:45:09 2003
@@ -150,21 +150,6 @@
*ram_len = RAM_END - RAM_START;
}
-void __init mach_reserve_bootmem ()
-{
- extern char _root_fs_image_start, _root_fs_image_end;
- u32 root_fs_image_start = (u32)&_root_fs_image_start;
- u32 root_fs_image_end = (u32)&_root_fs_image_end;
-
- /* Reserve the memory used by the root filesystem image if it's
- in RAM. */
- if (root_fs_image_end > root_fs_image_start
- && root_fs_image_start >= RAM_START
- && root_fs_image_start < RAM_END)
- reserve_bootmem (root_fs_image_start,
- root_fs_image_end - root_fs_image_start);
-}
-
void __init mach_sched_init (struct irqaction *timer_action)
{
/* The simulator actually cycles through all interrupts
diff -Nru a/arch/v850/kernel/simcons.c b/arch/v850/kernel/simcons.c
--- a/arch/v850/kernel/simcons.c Sat Oct 25 11:45:09 2003
+++ b/arch/v850/kernel/simcons.c Sat Oct 25 11:45:09 2003
@@ -104,7 +104,14 @@
tty_driver = driver;
return 0;
}
-__initcall (simcons_tty_init);
+/* We use `late_initcall' instead of just `__initcall' as a workaround for
+ the fact that (1) simcons_tty_init can't be called before tty_init,
+ (2) tty_init is called via `module_init', (3) if statically linked,
+ module_init == device_init, and (4) there's no ordering of init lists.
+ We can do this easily because simcons is always statically linked, but
+ other tty drivers that depend on tty_init and which must use
+ `module_init' to declare their init routines are likely to be broken. */
+late_initcall(simcons_tty_init);
/* Poll for input on the console, and if there's any, deliver it to the
tty driver. */
diff -Nru a/drivers/acpi/battery.c b/drivers/acpi/battery.c
--- a/drivers/acpi/battery.c Sat Oct 25 11:45:09 2003
+++ b/drivers/acpi/battery.c Sat Oct 25 11:45:09 2003
@@ -360,7 +360,7 @@
ACPI_FUNCTION_TRACE("acpi_battery_read_info");
- if (!battery)
+ if (!battery || (off != 0))
goto end;
if (battery->flags.present)
@@ -459,7 +459,7 @@
ACPI_FUNCTION_TRACE("acpi_battery_read_state");
- if (!battery)
+ if (!battery || (off != 0))
goto end;
if (battery->flags.present)
@@ -543,7 +543,7 @@
ACPI_FUNCTION_TRACE("acpi_battery_read_alarm");
- if (!battery)
+ if (!battery || (off != 0))
goto end;
if (!battery->flags.present) {
diff -Nru a/drivers/acpi/bus.c b/drivers/acpi/bus.c
--- a/drivers/acpi/bus.c Sat Oct 25 11:45:09 2003
+++ b/drivers/acpi/bus.c Sat Oct 25 11:45:09 2003
@@ -39,7 +39,7 @@
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME ("acpi_bus")
-extern void eisa_set_level_irq(unsigned int irq);
+extern void acpi_pic_set_level_irq(unsigned int irq);
FADT_DESCRIPTOR acpi_fadt;
struct acpi_device *acpi_root;
@@ -615,7 +615,7 @@
if (acpi_ioapic)
mp_config_ioapic_for_sci(acpi_fadt.sci_int);
else
- eisa_set_level_irq(acpi_fadt.sci_int);
+ acpi_pic_set_level_irq(acpi_fadt.sci_int);
#endif
status = acpi_enable_subsystem(ACPI_FULL_INITIALIZATION);
diff -Nru a/drivers/acpi/ec.c b/drivers/acpi/ec.c
--- a/drivers/acpi/ec.c Sat Oct 25 11:45:09 2003
+++ b/drivers/acpi/ec.c Sat Oct 25 11:45:09 2003
@@ -94,6 +94,13 @@
/* External interfaces use first EC only, so remember */
static struct acpi_device *first_ec;
+/*
+ * We use kernel thread to handle ec's gpe query, so the query may defer.
+ * The query need a context, which can be freed when we replace ec_ecdt
+ * with EC device. So defered query may have a wrong context.
+ * We use an indication to avoid it
+ */
+static int ec_device_init = 0;
/* --------------------------------------------------------------------------
Transaction Management
-------------------------------------------------------------------------- */
@@ -393,8 +400,11 @@
acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR);
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_ec_gpe_query, ec);
+ if (!ec_device_init)
+ acpi_ec_gpe_query(ec); /* directly query when device didn't init */
+ else
+ status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
+ acpi_ec_gpe_query, ec);
}
/* --------------------------------------------------------------------------
@@ -589,6 +599,8 @@
we now have the *real* EC info, so kill the makeshift one.*/
acpi_evaluate_integer(ec->handle, "_UID", NULL, &uid);
if (ec_ecdt && ec_ecdt->uid == uid) {
+ acpi_disable_gpe(NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR);
+ ec_device_init = 1;
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler);
diff -Nru a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
--- a/drivers/acpi/events/evgpe.c Sat Oct 25 11:45:09 2003
+++ b/drivers/acpi/events/evgpe.c Sat Oct 25 11:45:09 2003
@@ -217,8 +217,8 @@
gpe_number = (i * ACPI_GPE_REGISTER_WIDTH) + j;
int_status |= acpi_ev_gpe_dispatch (
- &gpe_block->event_info[gpe_number],
- gpe_number + gpe_block->register_info[gpe_number].base_gpe_number);
+ &gpe_block->event_info[gpe_number],
+ j + gpe_register_info->base_gpe_number);
}
}
}
diff -Nru a/drivers/acpi/power.c b/drivers/acpi/power.c
--- a/drivers/acpi/power.c Sat Oct 25 11:45:09 2003
+++ b/drivers/acpi/power.c Sat Oct 25 11:45:09 2003
@@ -337,6 +337,9 @@
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
return_VALUE(-EINVAL);
+ if ((device->power.state < ACPI_STATE_D0) || (device->power.state > ACPI_STATE_D3))
+ return_VALUE(-ENODEV);
+
cl = &device->power.states[device->power.state].resources;
tl = &device->power.states[state].resources;
@@ -359,8 +362,6 @@
goto end;
}
- device->power.state = state;
-
/*
* Then we dereference all power resources used in the current list.
*/
@@ -370,6 +371,8 @@
goto end;
}
+ /* We shouldn't change the state till all above operations succeed */
+ device->power.state = state;
end:
if (result)
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
diff -Nru a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
--- a/drivers/acpi/utilities/utdelete.c Sat Oct 25 11:45:09 2003
+++ b/drivers/acpi/utilities/utdelete.c Sat Oct 25 11:45:09 2003
@@ -416,7 +416,7 @@
u32 i;
union acpi_generic_state *state_list = NULL;
union acpi_generic_state *state;
-
+ union acpi_operand_object *tmp;
ACPI_FUNCTION_TRACE_PTR ("ut_update_object_reference", object);
@@ -448,8 +448,16 @@
switch (ACPI_GET_OBJECT_TYPE (object)) {
case ACPI_TYPE_DEVICE:
- acpi_ut_update_ref_count (object->device.system_notify, action);
- acpi_ut_update_ref_count (object->device.device_notify, action);
+ tmp = object->device.system_notify;
+ if (tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->device.system_notify = NULL;
+ acpi_ut_update_ref_count (tmp, action);
+
+ tmp = object->device.device_notify;
+ if (tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->device.device_notify = NULL;
+ acpi_ut_update_ref_count (tmp, action);
+
break;
@@ -470,6 +478,10 @@
if (ACPI_FAILURE (status)) {
goto error_exit;
}
+
+ tmp = object->package.elements[i];
+ if (tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->package.elements[i] = NULL;
}
break;
@@ -481,6 +493,10 @@
if (ACPI_FAILURE (status)) {
goto error_exit;
}
+
+ tmp = object->buffer_field.buffer_obj;
+ if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->buffer_field.buffer_obj = NULL;
break;
@@ -491,6 +507,10 @@
if (ACPI_FAILURE (status)) {
goto error_exit;
}
+
+ tmp = object->field.region_obj;
+ if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->field.region_obj = NULL;
break;
@@ -502,11 +522,19 @@
goto error_exit;
}
+ tmp = object->bank_field.bank_obj;
+ if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->bank_field.bank_obj = NULL;
+
status = acpi_ut_create_update_state_and_push (
object->bank_field.region_obj, action, &state_list);
if (ACPI_FAILURE (status)) {
goto error_exit;
}
+
+ tmp = object->bank_field.region_obj;
+ if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->bank_field.region_obj = NULL;
break;
@@ -518,11 +546,19 @@
goto error_exit;
}
+ tmp = object->index_field.index_obj;
+ if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->index_field.index_obj = NULL;
+
status = acpi_ut_create_update_state_and_push (
object->index_field.data_obj, action, &state_list);
if (ACPI_FAILURE (status)) {
goto error_exit;
}
+
+ tmp = object->index_field.data_obj;
+ if ( tmp && (tmp->common.reference_count <= 1) && action == REF_DECREMENT)
+ object->index_field.data_obj = NULL;
break;
diff -Nru a/drivers/base/cpu.c b/drivers/base/cpu.c
--- a/drivers/base/cpu.c Sat Oct 25 11:45:09 2003
+++ b/drivers/base/cpu.c Sat Oct 25 11:45:09 2003
@@ -23,10 +23,18 @@
*/
int __init register_cpu(struct cpu *cpu, int num, struct node *root)
{
+ int error;
+
cpu->node_id = cpu_to_node(num);
cpu->sysdev.id = num;
cpu->sysdev.cls = &cpu_sysdev_class;
- return sys_device_register(&cpu->sysdev);
+
+ error = sys_device_register(&cpu->sysdev);
+ if (!error && root)
+ error = sysfs_create_link(&root->sysdev.kobj,
+ &cpu->sysdev.kobj,
+ kobject_name(&cpu->sysdev.kobj));
+ return error;
}
diff -Nru a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
--- a/drivers/block/as-iosched.c Sat Oct 25 11:45:09 2003
+++ b/drivers/block/as-iosched.c Sat Oct 25 11:45:09 2003
@@ -1718,6 +1718,7 @@
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
if (arq) {
+ memset(arq, 0, sizeof(*arq));
RB_CLEAR(&arq->rb_node);
arq->request = rq;
arq->state = AS_RQ_NEW;
diff -Nru a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
--- a/drivers/block/deadline-iosched.c Sat Oct 25 11:45:09 2003
+++ b/drivers/block/deadline-iosched.c Sat Oct 25 11:45:09 2003
@@ -765,6 +765,7 @@
drq = mempool_alloc(dd->drq_pool, gfp_mask);
if (drq) {
+ memset(drq, 0, sizeof(*drq));
RB_CLEAR(&drq->rb_node);
drq->request = rq;
diff -Nru a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
--- a/drivers/block/scsi_ioctl.c Sat Oct 25 11:45:10 2003
+++ b/drivers/block/scsi_ioctl.c Sat Oct 25 11:45:10 2003
@@ -124,6 +124,8 @@
if (err)
return err;
+ if (size < 0)
+ return -EINVAL;
if (size > (q->max_sectors << 9))
return -EINVAL;
diff -Nru a/drivers/char/Kconfig b/drivers/char/Kconfig
--- a/drivers/char/Kconfig Sat Oct 25 11:45:09 2003
+++ b/drivers/char/Kconfig Sat Oct 25 11:45:09 2003
@@ -373,6 +373,22 @@
If you have an Alchemy AU1000 processor (MIPS based) and you want
to use serial ports, say Y. Otherwise, say N.
+config SGI_L1_SERIAL
+ bool "SGI Altix L1 serial support"
+ depends on SERIAL_NONSTANDARD && IA64
+ help
+ If you have an SGI Altix and you want to use the serial port
+ connected to the system controller (you want this!), say Y.
+ Otherwise, say N.
+
+config SGI_L1_SERIAL_CONSOLE
+ bool "SGI Altix L1 serial console support"
+ depends on SGI_L1_SERIAL
+ help
+ If you have an SGI Altix and you would like to use the system
+ controller serial port as your console (you want this!),
+ say Y. Otherwise, say N.
+
config AU1000_SERIAL_CONSOLE
bool "Enable Au1000 serial console"
depends on AU1000_UART
diff -Nru a/drivers/char/Makefile b/drivers/char/Makefile
--- a/drivers/char/Makefile Sat Oct 25 11:45:09 2003
+++ b/drivers/char/Makefile Sat Oct 25 11:45:09 2003
@@ -42,6 +42,7 @@
obj-$(CONFIG_SH_SCI) += sh-sci.o generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
+obj-$(CONFIG_SGI_L1_SERIAL) += sn_serial.o
obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_TIPAR) += tipar.o
diff -Nru a/drivers/char/drm/drm_drv.h b/drivers/char/drm/drm_drv.h
--- a/drivers/char/drm/drm_drv.h Sat Oct 25 11:45:09 2003
+++ b/drivers/char/drm/drm_drv.h Sat Oct 25 11:45:09 2003
@@ -638,7 +638,7 @@
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
DRM(stub_unregister)(DRM(minor)[i]);
DRM(takedown)( dev );
- return -ENOMEM;
+ return -EINVAL;
}
#endif
#if __REALLY_HAVE_MTRR
diff -Nru a/drivers/char/keyboard.c b/drivers/char/keyboard.c
--- a/drivers/char/keyboard.c Sat Oct 25 11:45:09 2003
+++ b/drivers/char/keyboard.c Sat Oct 25 11:45:09 2003
@@ -204,13 +204,13 @@
oldkey = INPUT_KEYCODE(dev, scancode);
INPUT_KEYCODE(dev, scancode) = keycode;
- for (i = 0; i < dev->keycodemax; i++)
- if(keycode == oldkey)
- break;
- if (i == dev->keycodemax)
- clear_bit(oldkey, dev->keybit);
+ clear_bit(oldkey, dev->keybit);
set_bit(keycode, dev->keybit);
-
+
+ for (i = 0; i < dev->keycodemax; i++)
+ if (INPUT_KEYCODE(dev,i) == oldkey)
+ set_bit(oldkey, dev->keybit);
+
return 0;
}
diff -Nru a/drivers/char/mem.c b/drivers/char/mem.c
--- a/drivers/char/mem.c Sat Oct 25 11:45:09 2003
+++ b/drivers/char/mem.c Sat Oct 25 11:45:09 2003
@@ -79,6 +79,22 @@
#endif
}
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
+{
+ unsigned long end_mem;
+
+ end_mem = __pa(high_memory);
+ if (addr >= end_mem)
+ return 0;
+
+ if (*count > end_mem - addr)
+ *count = end_mem - addr;
+
+ return 1;
+}
+#endif
+
static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
const char * buf, size_t count, loff_t *ppos)
{
@@ -113,14 +129,10 @@
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
- unsigned long end_mem;
ssize_t read;
- end_mem = __pa(high_memory);
- if (p >= end_mem)
- return 0;
- if (count > end_mem - p)
- count = end_mem - p;
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
read = 0;
#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
/* we don't have page 0 mapped on sparc and m68k.. */
@@ -149,13 +161,9 @@
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
- unsigned long end_mem;
- end_mem = __pa(high_memory);
- if (p >= end_mem)
- return 0;
- if (count > end_mem - p)
- count = end_mem - p;
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
return do_write_mem(file, __va(p), p, buf, count, ppos);
}
diff -Nru a/drivers/char/sn_serial.c b/drivers/char/sn_serial.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/char/sn_serial.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,1000 @@
+/*
+ * C-Brick Serial Port (and console) driver for SGI Altix machines.
+ *
+ * This driver is NOT suitable for talking to the l1-controller for
+ * anything other than 'console activities' --- please use the l1
+ * driver for that.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include /* this is needed for get_console_nasid */
+#include
+#include
+
+#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+static char sysrq_serial_str[] = "\eSYS";
+static char *sysrq_serial_ptr = sysrq_serial_str;
+static unsigned long sysrq_requested;
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */
+
+/* minor device number */
+#define SN_SAL_MINOR 64
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 128
+
+/* number of characters we can transmit to the SAL console at a time */
+#define SN_SAL_MAX_CHARS 120
+
+#define SN_SAL_EVENT_WRITE_WAKEUP 0
+
+/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to
+ * avoid losing chars, (always has to be a power of 2) */
+#define SN_SAL_BUFFER_SIZE (64 * (1 << 10))
+
+#define SN_SAL_UART_FIFO_DEPTH 16
+#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10
+
+/* we don't kmalloc/get_free_page these as we want them available
+ * before either of those are initialized */
+static char sn_xmit_buff_mem[SN_SAL_BUFFER_SIZE];
+
+struct volatile_circ_buf {
+ char *cb_buf;
+ int cb_head;
+ int cb_tail;
+};
+
+static struct volatile_circ_buf xmit = { .cb_buf = sn_xmit_buff_mem };
+static char sn_tmp_buffer[SN_SAL_BUFFER_SIZE];
+
+static struct tty_struct *sn_sal_tty;
+
+static struct timer_list sn_sal_timer;
+static int sn_sal_event; /* event type for task queue */
+
+static int sn_sal_is_asynch;
+static int sn_sal_irq;
+static spinlock_t sn_sal_lock = SPIN_LOCK_UNLOCKED;
+static int sn_total_tx_count;
+static int sn_total_rx_count;
+
+static void sn_sal_tasklet_action(unsigned long data);
+static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0);
+
+static unsigned long sn_interrupt_timeout;
+
+extern u64 master_node_bedrock_address;
+
+static int sn_debug_printf(const char *fmt, ...);
+
+#undef DEBUG
+#ifdef DEBUG
+#define DPRINTF(x...) sn_debug_printf(x)
+#else
+#define DPRINTF(x...) do { } while (0)
+#endif
+
+struct sn_sal_ops {
+ int (*sal_puts)(const char *s, int len);
+ int (*sal_getc)(void);
+ int (*sal_input_pending)(void);
+ void (*sal_wakeup_transmit)(void);
+};
+
+/* This is the pointer used. It is assigned to point to one of
+ * the tables below.
+ */
+static struct sn_sal_ops *sn_func;
+
+/* Prototypes */
+static void __init sn_sal_serial_console_init(void);
+static int snt_hw_puts(const char *, int);
+static int snt_poll_getc(void);
+static int snt_poll_input_pending(void);
+static int snt_sim_puts(const char *, int);
+static int snt_sim_getc(void);
+static int snt_sim_input_pending(void);
+static int snt_intr_getc(void);
+static int snt_intr_input_pending(void);
+static void sn_intr_transmit_chars(void);
+
+/* A table for polling */
+static struct sn_sal_ops poll_ops = {
+ .sal_puts = snt_hw_puts,
+ .sal_getc = snt_poll_getc,
+ .sal_input_pending = snt_poll_input_pending
+};
+
+/* A table for the simulator */
+static struct sn_sal_ops sim_ops = {
+ .sal_puts = snt_sim_puts,
+ .sal_getc = snt_sim_getc,
+ .sal_input_pending = snt_sim_input_pending
+};
+
+/* A table for interrupts enabled */
+static struct sn_sal_ops intr_ops = {
+ .sal_puts = snt_hw_puts,
+ .sal_getc = snt_intr_getc,
+ .sal_input_pending = snt_intr_input_pending,
+ .sal_wakeup_transmit = sn_intr_transmit_chars
+};
+
+
+/* the console does output in two distinctly different ways:
+ * synchronous and asynchronous (buffered). initally, early_printk
+ * does synchronous output. any data written goes directly to the SAL
+ * to be output (incidentally, it is internally buffered by the SAL)
+ * after interrupts and timers are initialized and available for use,
+ * the console init code switches to asynchronous output. this is
+ * also the earliest opportunity to begin polling for console input.
+ * after console initialization, console output and tty (serial port)
+ * output is buffered and sent to the SAL asynchronously (either by
+ * timer callback or by UART interrupt) */
+
+
+/* routines for running the console in polling mode */
+
+static int
+snt_hw_puts(const char *s, int len)
+{
+ /* looking at the PROM source code, putb calls the flush
+ * routine, so if we send characters in FIFO sized chunks, it
+ * should go out by the next time the timer gets called */
+ return ia64_sn_console_putb(s, len);
+}
+
+static int
+snt_poll_getc(void)
+{
+ int ch;
+ ia64_sn_console_getc(&ch);
+ return ch;
+}
+
+static int
+snt_poll_input_pending(void)
+{
+ int status, input;
+
+ status = ia64_sn_console_check(&input);
+ return !status && input;
+}
+
+
+/* routines for running the console on the simulator */
+
+static int
+snt_sim_puts(const char *str, int count)
+{
+ int counter = count;
+
+#ifdef FLAG_DIRECT_CONSOLE_WRITES
+ /* This is an easy way to pre-pend the output to know whether the output
+ * was done via sal or directly */
+ writeb('[', master_node_bedrock_address + (UART_TX << 3));
+ writeb('+', master_node_bedrock_address + (UART_TX << 3));
+ writeb(']', master_node_bedrock_address + (UART_TX << 3));
+ writeb(' ', master_node_bedrock_address + (UART_TX << 3));
+#endif /* FLAG_DIRECT_CONSOLE_WRITES */
+ while (counter > 0) {
+ writeb(*str, master_node_bedrock_address + (UART_TX << 3));
+ counter--;
+ str++;
+ }
+
+ return count;
+}
+
+static int
+snt_sim_getc(void)
+{
+ return readb(master_node_bedrock_address + (UART_RX << 3));
+}
+
+static int
+snt_sim_input_pending(void)
+{
+ return readb(master_node_bedrock_address + (UART_LSR << 3)) & UART_LSR_DR;
+}
+
+
+/* routines for an interrupt driven console (normal) */
+
+static int
+snt_intr_getc(void)
+{
+ return ia64_sn_console_readc();
+}
+
+static int
+snt_intr_input_pending(void)
+{
+ return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
+}
+
+/* The early printk (possible setup) and function call */
+
+void
+early_printk_sn_sal(const char *s, unsigned count)
+{
+ extern void early_sn_setup(void);
+
+ if (!sn_func) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_func = &sim_ops;
+ else
+ sn_func = &poll_ops;
+
+ early_sn_setup();
+ }
+ sn_func->sal_puts(s, count);
+}
+
+/* this is as "close to the metal" as we can get, used when the driver
+ * itself may be broken */
+static int
+sn_debug_printf(const char *fmt, ...)
+{
+ static char printk_buf[1024];
+ int printed_len;
+ va_list args;
+
+ va_start(args, fmt);
+ printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
+ early_printk_sn_sal(printk_buf, printed_len);
+ va_end(args);
+ return printed_len;
+}
+
+/*
+ * Interrupt handling routines.
+ */
+
+static void
+sn_sal_sched_event(int event)
+{
+ sn_sal_event |= (1 << event);
+ tasklet_schedule(&sn_sal_tasklet);
+}
+
+/* sn_receive_chars can be called before sn_sal_tty is initialized. in
+ * that case, its only use is to trigger sysrq and kdb */
+static void
+sn_receive_chars(struct pt_regs *regs, unsigned long *flags)
+{
+ int ch;
+
+ while (sn_func->sal_input_pending()) {
+ ch = sn_func->sal_getc();
+ if (ch < 0) {
+ printk(KERN_ERR "sn_serial: An error occured while "
+ "obtaining data from the console (0x%0x)\n", ch);
+ break;
+ }
+#if defined(CONFIG_SGI_L1_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+ if (sysrq_requested) {
+ unsigned long sysrq_timeout = sysrq_requested + HZ*5;
+
+ sysrq_requested = 0;
+ if (ch && time_before(jiffies, sysrq_timeout)) {
+ spin_unlock_irqrestore(&sn_sal_lock, *flags);
+ handle_sysrq(ch, regs, NULL);
+ spin_lock_irqsave(&sn_sal_lock, *flags);
+ /* don't record this char */
+ continue;
+ }
+ }
+ if (ch == *sysrq_serial_ptr) {
+ if (!(*++sysrq_serial_ptr)) {
+ sysrq_requested = jiffies;
+ sysrq_serial_ptr = sysrq_serial_str;
+ }
+ }
+ else
+ sysrq_serial_ptr = sysrq_serial_str;
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE && CONFIG_MAGIC_SYSRQ */
+
+ /* record the character to pass up to the tty layer */
+ if (sn_sal_tty) {
+ *sn_sal_tty->flip.char_buf_ptr = ch;
+ sn_sal_tty->flip.char_buf_ptr++;
+ sn_sal_tty->flip.count++;
+ if (sn_sal_tty->flip.count == TTY_FLIPBUF_SIZE)
+ break;
+ }
+ sn_total_rx_count++;
+ }
+
+ if (sn_sal_tty)
+ tty_flip_buffer_push((struct tty_struct *)sn_sal_tty);
+}
+
+
+/* synch_flush_xmit must be called with sn_sal_lock */
+static void
+synch_flush_xmit(void)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+
+ if (xmit.cb_head == xmit.cb_tail)
+ return; /* Nothing to do. */
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ result = sn_func->sal_puts((char *)start, xmit_count);
+ if (!result)
+ sn_debug_printf("\n*** synch_flush_xmit failed to flush\n");
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = (char *)&xmit.cb_buf[tail];
+ }
+ }
+ }
+}
+
+/* must be called with a lock protecting the circular buffer and
+ * sn_sal_tty */
+static void
+sn_poll_transmit_chars(void)
+{
+ int xmit_count, tail, head;
+ int result;
+ char *start;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ if (xmit.cb_head == xmit.cb_tail ||
+ (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) {
+ /* Nothing to do. */
+ return;
+ }
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ xmit_count = (head < tail) ? (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count == 0)
+ sn_debug_printf("\n*** empty xmit_count\n");
+
+ /* use the ops, as we could be on the simulator */
+ result = sn_func->sal_puts((char *)start, xmit_count);
+ if (!result)
+ sn_debug_printf("\n*** error in synchronous sal_puts\n");
+ /* XXX chadt clean this up */
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = &xmit.cb_buf[tail];
+ }
+
+ /* if there's few enough characters left in the xmit buffer
+ * that we could stand for the upper layer to send us some
+ * more, ask for it. */
+ if (sn_sal_tty)
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS)
+ sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP);
+}
+
+
+/* must be called with a lock protecting the circular buffer and
+ * sn_sal_tty */
+static void
+sn_intr_transmit_chars(void)
+{
+ int xmit_count, tail, head, loops, ii;
+ int result;
+ char *start;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ if (xmit.cb_head == xmit.cb_tail ||
+ (sn_sal_tty && (sn_sal_tty->stopped || sn_sal_tty->hw_stopped))) {
+ /* Nothing to do. */
+ return;
+ }
+
+ head = xmit.cb_head;
+ tail = xmit.cb_tail;
+ start = &xmit.cb_buf[tail];
+
+ /* twice around gets the tail to the end of the buffer and
+ * then to the head, if needed */
+ loops = (head < tail) ? 2 : 1;
+
+ for (ii = 0; ii < loops; ii++) {
+ xmit_count = (head < tail) ?
+ (SN_SAL_BUFFER_SIZE - tail) : (head - tail);
+
+ if (xmit_count > 0) {
+ result = ia64_sn_console_xmit_chars((char *)start, xmit_count);
+#ifdef DEBUG
+ if (!result)
+ sn_debug_printf("`");
+#endif
+ if (result > 0) {
+ xmit_count -= result;
+ sn_total_tx_count += result;
+ tail += result;
+ tail &= SN_SAL_BUFFER_SIZE - 1;
+ xmit.cb_tail = tail;
+ start = &xmit.cb_buf[tail];
+ }
+ }
+ }
+
+ /* if there's few enough characters left in the xmit buffer
+ * that we could stand for the upper layer to send us some
+ * more, ask for it. */
+ if (sn_sal_tty)
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) < WAKEUP_CHARS)
+ sn_sal_sched_event(SN_SAL_EVENT_WRITE_WAKEUP);
+}
+
+
+static irqreturn_t
+sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* this call is necessary to pass the interrupt back to the
+ * SAL, since it doesn't intercept the UART interrupts
+ * itself */
+ int status = ia64_sn_console_intr_status();
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (status & SAL_CONSOLE_INTR_RECV)
+ sn_receive_chars(regs, &flags);
+ if (status & SAL_CONSOLE_INTR_XMIT)
+ sn_intr_transmit_chars();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+/* returns the console irq if interrupt is successfully registered,
+ * else 0 */
+static int
+sn_sal_connect_interrupt(void)
+{
+ cpuid_t intr_cpuid;
+ unsigned int intr_cpuloc;
+ nasid_t console_nasid;
+ unsigned int console_irq;
+ int result;
+
+ console_nasid = ia64_sn_get_console_nasid();
+ intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu;
+ intr_cpuloc = cpu_physical_id(intr_cpuid);
+ console_irq = CPU_VECTOR_TO_IRQ(intr_cpuloc, SGI_UART_VECTOR);
+
+ result = intr_connect_level(intr_cpuid, SGI_UART_VECTOR);
+ BUG_ON(result != SGI_UART_VECTOR);
+
+ result = request_irq(console_irq, sn_sal_interrupt, SA_INTERRUPT, "SAL console driver", &sn_sal_tty);
+ if (result >= 0)
+ return console_irq;
+
+ printk(KERN_INFO "sn_serial: console proceeding in polled mode\n");
+ return 0;
+}
+
+static void
+sn_sal_tasklet_action(unsigned long data)
+{
+ unsigned long flags;
+
+ if (sn_sal_tty) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (sn_sal_tty) {
+ if (test_and_clear_bit(SN_SAL_EVENT_WRITE_WAKEUP, &sn_sal_event)) {
+ if ((sn_sal_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && sn_sal_tty->ldisc.write_wakeup)
+ (sn_sal_tty->ldisc.write_wakeup)((struct tty_struct *)sn_sal_tty);
+ wake_up_interruptible((wait_queue_head_t *)&sn_sal_tty->write_wait);
+ }
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+
+/*
+ * This function handles polled mode.
+ */
+static void
+sn_sal_timer_poll(unsigned long dummy)
+{
+ unsigned long flags;
+
+ if (!sn_sal_irq) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ sn_receive_chars(NULL, &flags);
+ sn_poll_transmit_chars();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout);
+ }
+}
+
+
+/*
+ * User-level console routines
+ */
+
+static int
+sn_sal_open(struct tty_struct *tty, struct file *filp)
+{
+ unsigned long flags;
+
+ DPRINTF("sn_sal_open: sn_sal_tty = %p, tty = %p, filp = %p\n",
+ sn_sal_tty, tty, filp);
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (!sn_sal_tty)
+ sn_sal_tty = tty;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ return 0;
+}
+
+
+/* We're keeping all our resources. We're keeping interrupts turned
+ * on. Maybe just let the tty layer finish its stuff...? GMSH
+ */
+static void
+sn_sal_close(struct tty_struct *tty, struct file * filp)
+{
+ if (tty->count == 1) {
+ unsigned long flags;
+ tty->closing = 1;
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ if (tty->ldisc.flush_buffer)
+ tty->ldisc.flush_buffer(tty);
+ tty->closing = 0;
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ sn_sal_tty = NULL;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+
+static int
+sn_sal_write(struct tty_struct *tty, int from_user,
+ const unsigned char *buf, int count)
+{
+ int c, ret = 0;
+ unsigned long flags;
+
+ if (from_user) {
+ while (1) {
+ int c1;
+ c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail,
+ SN_SAL_BUFFER_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0)
+ break;
+
+ c -= copy_from_user(sn_tmp_buffer, buf, c);
+ if (!c) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Turn off interrupts and see if the xmit buffer has
+ * moved since the last time we looked.
+ */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ c1 = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+
+ if (c1 < c)
+ c = c1;
+
+ memcpy(xmit.cb_buf + xmit.cb_head, sn_tmp_buffer, c);
+ xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1));
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ }
+ else {
+ /* The buffer passed in isn't coming from userland,
+ * so cut out the middleman (sn_tmp_buffer).
+ */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ while (1) {
+ c = CIRC_SPACE_TO_END(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+
+ if (count < c)
+ c = count;
+ if (c <= 0) {
+ break;
+ }
+ memcpy(xmit.cb_buf + xmit.cb_head, buf, c);
+ xmit.cb_head = ((xmit.cb_head + c) & (SN_SAL_BUFFER_SIZE - 1));
+ buf += c;
+ count -= c;
+ ret += c;
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (xmit.cb_head != xmit.cb_tail && !(tty && (tty->stopped || tty->hw_stopped)))
+ if (sn_func->sal_wakeup_transmit)
+ sn_func->sal_wakeup_transmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ return ret;
+}
+
+
+static void
+sn_sal_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE) != 0) {
+ xmit.cb_buf[xmit.cb_head] = ch;
+ xmit.cb_head = (xmit.cb_head + 1) & (SN_SAL_BUFFER_SIZE-1);
+ if ( sn_func->sal_wakeup_transmit )
+ sn_func->sal_wakeup_transmit();
+ }
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+
+static void
+sn_sal_flush_chars(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ if (CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE))
+ if (sn_func->sal_wakeup_transmit)
+ sn_func->sal_wakeup_transmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+
+static int
+sn_sal_write_room(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int space;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ space = CIRC_SPACE(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return space;
+}
+
+
+static int
+sn_sal_chars_in_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+ int space;
+
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ space = CIRC_CNT(xmit.cb_head, xmit.cb_tail, SN_SAL_BUFFER_SIZE);
+ DPRINTF("<%d>", space);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ return space;
+}
+
+
+static void
+sn_sal_flush_buffer(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ /* drop everything */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ xmit.cb_head = xmit.cb_tail = 0;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+
+ /* wake up tty level */
+ wake_up_interruptible(&tty->write_wait);
+ if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup)
+ (tty->ldisc.write_wakeup)(tty);
+}
+
+
+static void
+sn_sal_hangup(struct tty_struct *tty)
+{
+ sn_sal_flush_buffer(tty);
+}
+
+
+static void
+sn_sal_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* this is SAL's problem */
+ DPRINTF("");
+}
+
+
+/*
+ * sn_sal_read_proc
+ *
+ * Console /proc interface
+ */
+
+static int
+sn_sal_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+ off_t begin = 0;
+
+ len += sprintf(page, "sn_serial: nasid:%d irq:%d tx:%d rx:%d\n",
+ get_console_nasid(), sn_sal_irq,
+ sn_total_tx_count, sn_total_rx_count);
+ *eof = 1;
+
+ if (off >= len+begin)
+ return 0;
+ *start = page + (off-begin);
+
+ return count < begin+len-off ? count : begin+len-off;
+}
+
+
+static struct tty_operations sn_sal_driver_ops = {
+ .open = sn_sal_open,
+ .close = sn_sal_close,
+ .write = sn_sal_write,
+ .put_char = sn_sal_put_char,
+ .flush_chars = sn_sal_flush_chars,
+ .write_room = sn_sal_write_room,
+ .chars_in_buffer = sn_sal_chars_in_buffer,
+ .hangup = sn_sal_hangup,
+ .wait_until_sent = sn_sal_wait_until_sent,
+ .read_proc = sn_sal_read_proc,
+};
+static struct tty_driver *sn_sal_driver;
+
+/* sn_sal_init wishlist:
+ * - allocate sn_tmp_buffer
+ * - fix up the tty_driver struct
+ * - turn on receive interrupts
+ * - do any termios twiddling once and for all
+ */
+
+/*
+ * Boot-time initialization code
+ */
+
+static void __init
+sn_sal_switch_to_asynch(void)
+{
+ unsigned long flags;
+
+ sn_debug_printf("sn_serial: about to switch to asynchronous console\n");
+
+ /* without early_printk, we may be invoked late enough to race
+ * with other cpus doing console IO at this point, however
+ * console interrupts will never be enabled */
+ spin_lock_irqsave(&sn_sal_lock, flags);
+
+ /* early_printk invocation may have done this for us */
+ if (!sn_func) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_func = &sim_ops;
+ else
+ sn_func = &poll_ops;
+ }
+
+ /* we can't turn on the console interrupt (as request_irq
+ * calls kmalloc, which isn't set up yet), so we rely on a
+ * timer to poll for input and push data from the console
+ * buffer.
+ */
+ init_timer(&sn_sal_timer);
+ sn_sal_timer.function = sn_sal_timer_poll;
+
+ if (IS_RUNNING_ON_SIMULATOR())
+ sn_interrupt_timeout = 6;
+ else {
+ /* 960cps / 16 char FIFO = 60HZ
+ * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */
+ sn_interrupt_timeout = HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS;
+ }
+ mod_timer(&sn_sal_timer, jiffies + sn_interrupt_timeout);
+
+ sn_sal_is_asynch = 1;
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+}
+
+static void __init
+sn_sal_switch_to_interrupts(void)
+{
+ int irq;
+
+ sn_debug_printf("sn_serial: switching to interrupt driven console\n");
+
+ irq = sn_sal_connect_interrupt();
+ if (irq) {
+ unsigned long flags;
+ spin_lock_irqsave(&sn_sal_lock, flags);
+
+ /* sn_sal_irq is a global variable. When it's set to
+ * a non-zero value, we stop polling for input (since
+ * interrupts should now be enabled). */
+ sn_sal_irq = irq;
+ sn_func = &intr_ops;
+
+ /* turn on receive interrupts */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ }
+}
+
+static int __init
+sn_sal_module_init(void)
+{
+ int retval;
+
+ printk("sn_serial: sn_sal_module_init\n");
+
+ if (!ia64_platform_is("sn2"))
+ return -ENODEV;
+
+ sn_sal_driver = alloc_tty_driver(1);
+ if ( !sn_sal_driver )
+ return -ENOMEM;
+
+ sn_sal_driver->owner = THIS_MODULE;
+ sn_sal_driver->driver_name = "sn_serial";
+ sn_sal_driver->name = "ttyS";
+ sn_sal_driver->major = TTY_MAJOR;
+ sn_sal_driver->minor_start = SN_SAL_MINOR;
+ sn_sal_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ sn_sal_driver->subtype = SERIAL_TYPE_NORMAL;
+ sn_sal_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS;
+
+ tty_set_operations(sn_sal_driver, &sn_sal_driver_ops);
+
+ /* when this driver is compiled in, the console initialization
+ * will have already switched us into asynchronous operation
+ * before we get here through the module initcalls */
+ if (!sn_sal_is_asynch)
+ sn_sal_switch_to_asynch();
+
+ /* at this point (module_init) we can try to turn on interrupts */
+ if (!IS_RUNNING_ON_SIMULATOR())
+ sn_sal_switch_to_interrupts();
+
+ sn_sal_driver->init_termios = tty_std_termios;
+ sn_sal_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+
+ if ((retval = tty_register_driver(sn_sal_driver))) {
+ printk(KERN_ERR "sn_serial: Unable to register tty driver\n");
+ return retval;
+ }
+#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE
+ sn_sal_serial_console_init();
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE */
+ return 0;
+}
+
+
+static void __exit
+sn_sal_module_exit(void)
+{
+ del_timer_sync(&sn_sal_timer);
+ tty_unregister_driver(sn_sal_driver);
+ put_tty_driver(sn_sal_driver);
+}
+
+module_init(sn_sal_module_init);
+module_exit(sn_sal_module_exit);
+
+/*
+ * Kernel console definitions
+ */
+
+#ifdef CONFIG_SGI_L1_SERIAL_CONSOLE
+/*
+ * Print a string to the SAL console. The console_lock must be held
+ * when we get here.
+ */
+static void
+sn_sal_console_write(struct console *co, const char *s, unsigned count)
+{
+ unsigned long flags;
+
+ BUG_ON(!sn_sal_is_asynch);
+
+ /* somebody really wants this output, might be an
+ * oops, kdb, panic, etc. make sure they get it. */
+ if (spin_is_locked(&sn_sal_lock)) {
+ synch_flush_xmit();
+ sn_func->sal_puts(s, count);
+ }
+ else if (in_interrupt()) {
+ spin_lock_irqsave(&sn_sal_lock, flags);
+ synch_flush_xmit();
+ spin_unlock_irqrestore(&sn_sal_lock, flags);
+ sn_func->sal_puts(s, count);
+ }
+ else
+ sn_sal_write(NULL, 0, s, count);
+}
+
+static struct tty_driver *
+sn_sal_console_device(struct console *c, int *index)
+{
+ *index = c->index;
+ return sn_sal_driver;
+}
+
+static int __init
+sn_sal_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+
+static struct console sal_console = {
+ .name = "ttyS",
+ .write = sn_sal_console_write,
+ .device = sn_sal_console_device,
+ .setup = sn_sal_console_setup,
+ .index = -1
+};
+
+static void __init
+sn_sal_serial_console_init(void)
+{
+ if (ia64_platform_is("sn2")) {
+ sn_sal_switch_to_asynch();
+ sn_debug_printf("sn_sal_serial_console_init : register console\n");
+ register_console(&sal_console);
+ }
+}
+
+#endif /* CONFIG_SGI_L1_SERIAL_CONSOLE */
diff -Nru a/drivers/char/toshiba.c b/drivers/char/toshiba.c
--- a/drivers/char/toshiba.c Sat Oct 25 11:45:09 2003
+++ b/drivers/char/toshiba.c Sat Oct 25 11:45:09 2003
@@ -292,7 +292,7 @@
* Print the information for /proc/toshiba
*/
#ifdef CONFIG_PROC_FS
-int tosh_get_info(char *buffer, char **start, off_t fpos, int length)
+static int tosh_get_info(char *buffer, char **start, off_t fpos, int length)
{
char *temp;
int key;
diff -Nru a/drivers/char/tty_io.c b/drivers/char/tty_io.c
--- a/drivers/char/tty_io.c Sat Oct 25 11:45:09 2003
+++ b/drivers/char/tty_io.c Sat Oct 25 11:45:09 2003
@@ -423,8 +423,6 @@
redirect = NULL;
}
spin_unlock(&redirect_lock);
- if (f)
- fput(f);
check_tty_count(tty, "do_tty_hangup");
file_list_lock();
@@ -512,6 +510,8 @@
} else if (tty->driver->hangup)
(tty->driver->hangup)(tty);
unlock_kernel();
+ if (f)
+ fput(f);
}
void tty_hangup(struct tty_struct * tty)
diff -Nru a/drivers/ide/Makefile b/drivers/ide/Makefile
--- a/drivers/ide/Makefile Sat Oct 25 11:45:09 2003
+++ b/drivers/ide/Makefile Sat Oct 25 11:45:09 2003
@@ -8,7 +8,7 @@
# In the future, some of these should be built conditionally.
#
# First come modules that register themselves with the core
-obj-$(CONFIG_BLK_DEV_IDEPCI) += pci/
+obj-$(CONFIG_BLK_DEV_IDE) += pci/
# Core IDE code - must come before legacy
diff -Nru a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
--- a/drivers/ide/pci/piix.c Sat Oct 25 11:45:09 2003
+++ b/drivers/ide/pci/piix.c Sat Oct 25 11:45:09 2003
@@ -797,7 +797,9 @@
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_11,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15},
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16},
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_10,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17},
+#ifndef CONFIG_SCSI_SATA
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18},
+#endif /* !CONFIG_SCSI_SATA */
{ 0, },
};
diff -Nru a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
--- a/drivers/ide/pci/siimage.c Sat Oct 25 11:45:09 2003
+++ b/drivers/ide/pci/siimage.c Sat Oct 25 11:45:09 2003
@@ -35,13 +35,13 @@
#include "siimage.h"
#if defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS)
-#include
#include
static u8 siimage_proc = 0;
#define SIIMAGE_MAX_DEVS 16
static struct pci_dev *siimage_devs[SIIMAGE_MAX_DEVS];
static int n_siimage_devs;
+#endif /* defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS) */
/**
* pdev_is_sata - check if device is SATA
@@ -120,7 +120,8 @@
base |= drive->select.b.unit << drive->select.b.unit;
return base;
}
-
+
+#if defined(DISPLAY_SIIMAGE_TIMINGS) && defined(CONFIG_PROC_FS)
/**
* print_siimage_get_info - print minimal proc information
* @buf: buffer to write into (kernel space)
diff -Nru a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
--- a/drivers/input/gameport/fm801-gp.c Sat Oct 25 11:45:09 2003
+++ b/drivers/input/gameport/fm801-gp.c Sat Oct 25 11:45:09 2003
@@ -140,7 +140,7 @@
.name = "FM801 GP",
.id_table = fm801_gp_id_table,
.probe = fm801_gp_probe,
- .remove = fm801_gp_remove,
+ .remove = __devexit_p(fm801_gp_remove),
};
int __init fm801_gp_init(void)
diff -Nru a/drivers/input/gameport/vortex.c b/drivers/input/gameport/vortex.c
--- a/drivers/input/gameport/vortex.c Sat Oct 25 11:45:09 2003
+++ b/drivers/input/gameport/vortex.c Sat Oct 25 11:45:09 2003
@@ -168,7 +168,7 @@
.name = "vortex",
.id_table = vortex_id_table,
.probe = vortex_probe,
- .remove = vortex_remove,
+ .remove = __devexit_p(vortex_remove),
};
int __init vortex_init(void)
diff -Nru a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
--- a/drivers/input/keyboard/atkbd.c Sat Oct 25 11:45:09 2003
+++ b/drivers/input/keyboard/atkbd.c Sat Oct 25 11:45:09 2003
@@ -111,6 +111,7 @@
#define ATKBD_CMD_SETREP 0x10f3
#define ATKBD_CMD_ENABLE 0x00f4
#define ATKBD_CMD_RESET_DIS 0x00f5
+#define ATKBD_CMD_SETALL_MBR 0x00fa
#define ATKBD_CMD_RESET_BAT 0x02ff
#define ATKBD_CMD_RESEND 0x00fe
#define ATKBD_CMD_EX_ENABLE 0x10ea
@@ -519,6 +520,8 @@
if (atkbd_command(atkbd, param, ATKBD_CMD_SSCANSET))
return 2;
}
+
+ atkbd_command(atkbd, param, ATKBD_CMD_SETALL_MBR);
return 3;
}
diff -Nru a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
--- a/drivers/isdn/capi/kcapi.c Sat Oct 25 11:45:09 2003
+++ b/drivers/isdn/capi/kcapi.c Sat Oct 25 11:45:09 2003
@@ -79,7 +79,6 @@
{
if (!try_module_get(card->owner))
return NULL;
- DBG("Reserve module: %s", card->owner->name);
return card;
}
@@ -87,7 +86,6 @@
capi_ctr_put(struct capi_ctr *card)
{
module_put(card->owner);
- DBG("Release module: %s", card->owner->name);
}
/* ------------------------------------------------------------- */
diff -Nru a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
--- a/drivers/mca/mca-driver.c Sat Oct 25 11:45:09 2003
+++ b/drivers/mca/mca-driver.c Sat Oct 25 11:45:09 2003
@@ -32,9 +32,11 @@
{
int r;
- mca_drv->driver.bus = &mca_bus_type;
- if ((r = driver_register(&mca_drv->driver)) < 0)
- return r;
+ if (MCA_bus) {
+ mca_drv->driver.bus = &mca_bus_type;
+ if ((r = driver_register(&mca_drv->driver)) < 0)
+ return r;
+ }
return 0;
}
@@ -42,6 +44,7 @@
void mca_unregister_driver(struct mca_driver *mca_drv)
{
- driver_unregister(&mca_drv->driver);
+ if (MCA_bus)
+ driver_unregister(&mca_drv->driver);
}
EXPORT_SYMBOL(mca_unregister_driver);
diff -Nru a/drivers/mca/mca-legacy.c b/drivers/mca/mca-legacy.c
--- a/drivers/mca/mca-legacy.c Sat Oct 25 11:45:10 2003
+++ b/drivers/mca/mca-legacy.c Sat Oct 25 11:45:10 2003
@@ -123,7 +123,7 @@
{
struct mca_find_adapter_info info = { 0 };
- if(id == 0xffff)
+ if (!MCA_bus || id == 0xffff)
return MCA_NOTFOUND;
info.slot = start;
diff -Nru a/drivers/md/raid0.c b/drivers/md/raid0.c
--- a/drivers/md/raid0.c Sat Oct 25 11:45:09 2003
+++ b/drivers/md/raid0.c Sat Oct 25 11:45:09 2003
@@ -332,7 +332,7 @@
static int raid0_make_request (request_queue_t *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
- unsigned int sect_in_chunk, chunksize_bits, chunk_size;
+ unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
raid0_conf_t *conf = mddev_to_conf(mddev);
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
@@ -340,11 +340,12 @@
sector_t block, rsect;
chunk_size = mddev->chunk_size >> 10;
+ chunk_sects = mddev->chunk_size >> 9;
chunksize_bits = ffz(~chunk_size);
block = bio->bi_sector >> 1;
- if (unlikely(chunk_size < (block & (chunk_size - 1)) + (bio->bi_size >> 10))) {
+ if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 ||
@@ -353,7 +354,7 @@
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
*/
- bp = bio_split(bio, bio_split_pool, (chunk_size - (block & (chunk_size - 1)))<<1 );
+ bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
if (raid0_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
if (raid0_make_request(q, &bp->bio2))
diff -Nru a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
--- a/drivers/media/common/saa7146_i2c.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/common/saa7146_i2c.c Sat Oct 25 11:45:09 2003
@@ -409,11 +409,8 @@
if( NULL != i2c_adapter ) {
memset(i2c_adapter,0,sizeof(struct i2c_adapter));
strcpy(i2c_adapter->name, dev->name);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- i2c_adapter->data = dev;
-#else
i2c_set_adapdata(i2c_adapter,dev);
-#endif
+ i2c_adapter->class = I2C_ADAP_CLASS_TV_ANALOG;
i2c_adapter->algo = &saa7146_algo;
i2c_adapter->algo_data = NULL;
i2c_adapter->id = I2C_ALGO_SAA7146;
diff -Nru a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
--- a/drivers/media/dvb/dvb-core/dvb_demux.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c Sat Oct 25 11:45:09 2003
@@ -105,7 +105,7 @@
{
u32 crc;
- crc = crc32_le(~0, data, length);
+ crc = crc32_be(~0, data, length);
data[length] = (crc >> 24) & 0xff;
data[length+1] = (crc >> 16) & 0xff;
@@ -116,7 +116,7 @@
static u32 dvb_dmx_crc32 (struct dvb_demux_feed *f, const u8 *src, size_t len)
{
- return (f->feed.sec.crc_val = crc32_le (f->feed.sec.crc_val, src, len));
+ return (f->feed.sec.crc_val = crc32_be (f->feed.sec.crc_val, src, len));
}
diff -Nru a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
--- a/drivers/media/dvb/frontends/tda1004x.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/dvb/frontends/tda1004x.c Sat Oct 25 11:45:09 2003
@@ -44,12 +44,12 @@
#include "dvb_frontend.h"
#include "dvb_functions.h"
-#ifndef CONFIG_TDA1004X_MC_LOCATION
-#define CONFIG_TDA1004X_MC_LOCATION "/etc/dvb/tda1004x.mc"
+#ifndef DVB_TDA1004X_FIRMWARE_FILE
+#define DVB_TDA1004X_FIRMWARE_FILE "/etc/dvb/tda1004x.mc"
#endif
static int tda1004x_debug = 0;
-static char *tda1004x_firmware = CONFIG_TDA1004X_MC_LOCATION;
+static char *tda1004x_firmware = DVB_TDA1004X_FIRMWARE_FILE;
#define TDA10045H_ADDRESS 0x08
diff -Nru a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
--- a/drivers/media/dvb/ttpci/av7110.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/dvb/ttpci/av7110.c Sat Oct 25 11:45:09 2003
@@ -4498,6 +4498,30 @@
0x1d, 0x00,
0x1e, 0x00,
+ 0x41, 0x77,
+ 0x42, 0x77,
+ 0x43, 0x77,
+ 0x44, 0x77,
+ 0x45, 0x77,
+ 0x46, 0x77,
+ 0x47, 0x77,
+ 0x48, 0x77,
+ 0x49, 0x77,
+ 0x4a, 0x77,
+ 0x4b, 0x77,
+ 0x4c, 0x77,
+ 0x4d, 0x77,
+ 0x4e, 0x77,
+ 0x4f, 0x77,
+ 0x50, 0x77,
+ 0x51, 0x77,
+ 0x52, 0x77,
+ 0x53, 0x77,
+ 0x54, 0x77,
+ 0x55, 0x77,
+ 0x56, 0x77,
+ 0x57, 0xff,
+
0xff
};
diff -Nru a/drivers/media/dvb/ttpci/av7110_firm.h b/drivers/media/dvb/ttpci/av7110_firm.h
--- a/drivers/media/dvb/ttpci/av7110_firm.h Sat Oct 25 11:45:09 2003
+++ b/drivers/media/dvb/ttpci/av7110_firm.h Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
#include
-u8 Dpram [] __initdata = {
+static u8 Dpram [] = {
0xe5, 0x9f, 0xf0, 0x1c, 0xe1, 0xb0, 0xf0, 0x0e,
0xe5, 0x9f, 0xf0, 0x18, 0xe2, 0x5e, 0xf0, 0x04,
0xe2, 0x5e, 0xf0, 0x08, 0xe1, 0xa0, 0x00, 0x00,
@@ -41,7 +41,7 @@
};
-u8 Root [] __initdata = {
+static u8 Root [] = {
0xb4, 0x90, 0x49, 0x18, 0x1c, 0x0b, 0x4a, 0x18,
0x1a, 0x50, 0x4f, 0x18, 0x1a, 0x79, 0x10, 0x8f,
0x21, 0x00, 0x2f, 0x00, 0xdd, 0x04, 0xcb, 0x10,
diff -Nru a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
--- a/drivers/media/video/bttv-driver.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/video/bttv-driver.c Sat Oct 25 11:45:09 2003
@@ -2818,6 +2818,7 @@
up(&fh->cap.lock);
return POLLERR;
}
+ fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR;
field = videobuf_next_field(&fh->cap);
if (0 != fh->cap.ops->buf_prepare(file,fh->cap.read_buf,field)) {
up(&fh->cap.lock);
diff -Nru a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
--- a/drivers/media/video/tda9840.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/video/tda9840.c Sat Oct 25 11:45:09 2003
@@ -196,6 +196,7 @@
printk("tda9840.o: not enough kernel memory.\n");
return -ENOMEM;
}
+ memset(client, 0, sizeof(struct i2c_client));
/* fill client structure */
sprintf(client->name,"tda9840 (0x%02x)", address);
@@ -258,9 +259,7 @@
}
static struct i2c_driver driver = {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,54)
.owner = THIS_MODULE,
-#endif
.name = "tda9840 driver",
.id = I2C_DRIVERID_TDA9840,
.flags = I2C_DF_NOTIFY,
diff -Nru a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
--- a/drivers/media/video/tea6415c.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/video/tea6415c.c Sat Oct 25 11:45:09 2003
@@ -70,6 +70,7 @@
if (0 == client) {
return -ENOMEM;
}
+ memset(client, 0, sizeof(struct i2c_client));
/* fill client structure */
sprintf(client->name,"tea6415c (0x%02x)", address);
@@ -207,9 +208,7 @@
}
static struct i2c_driver driver = {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,54)
.owner = THIS_MODULE,
-#endif
.name = "tea6415c driver",
.id = I2C_DRIVERID_TEA6415C,
.flags = I2C_DF_NOTIFY,
diff -Nru a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
--- a/drivers/media/video/tea6420.c Sat Oct 25 11:45:09 2003
+++ b/drivers/media/video/tea6420.c Sat Oct 25 11:45:09 2003
@@ -110,7 +110,8 @@
if (0 == client) {
return -ENOMEM;
}
-
+ memset(client, 0x0, sizeof(struct i2c_client));
+
/* fill client structure */
sprintf(client->name,"tea6420 (0x%02x)", address);
client->id = tea6420_id++;
@@ -187,9 +188,7 @@
}
static struct i2c_driver driver = {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,54)
.owner = THIS_MODULE,
-#endif
.name = "tea6420 driver",
.id = I2C_DRIVERID_TEA6420,
.flags = I2C_DF_NOTIFY,
diff -Nru a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
--- a/drivers/mtd/inftlcore.c Sat Oct 25 11:45:09 2003
+++ b/drivers/mtd/inftlcore.c Sat Oct 25 11:45:09 2003
@@ -757,7 +757,7 @@
u8 eccbuf[6];
char *p, *pend;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=0x%x,block=%d,"
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=0x%x,block=%ld,"
"buffer=0x%x)\n", (int)inftl, block, (int)buffer);
/* Is block all zero? */
@@ -803,7 +803,7 @@
struct inftl_bci bci;
size_t retlen;
- DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=0x%x,block=%d,"
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=0x%x,block=%ld,"
"buffer=0x%x)\n", (int)inftl, block, (int)buffer);
while (thisEUN < inftl->nb_blocks) {
diff -Nru a/drivers/net/tg3.c b/drivers/net/tg3.c
--- a/drivers/net/tg3.c Sat Oct 25 11:45:09 2003
+++ b/drivers/net/tg3.c Sat Oct 25 11:45:09 2003
@@ -7776,6 +7776,8 @@
tg3_netif_stop(tp);
+ del_timer_sync(&tp->timer);
+
spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock);
tg3_disable_ints(tp);
@@ -7797,6 +7799,9 @@
tg3_init_hw(tp);
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
spin_unlock(&tp->tx_lock);
spin_unlock_irq(&tp->lock);
@@ -7826,6 +7831,10 @@
spin_lock(&tp->tx_lock);
tg3_init_hw(tp);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
tg3_enable_ints(tp);
spin_unlock(&tp->tx_lock);
diff -Nru a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c
--- a/drivers/net/wireless/arlan-proc.c Sat Oct 25 11:45:09 2003
+++ b/drivers/net/wireless/arlan-proc.c Sat Oct 25 11:45:09 2003
@@ -1229,7 +1229,7 @@
//};
-
+#ifdef CONFIG_PROC_FS
static struct ctl_table_header *arlan_device_sysctl_header;
int __init init_arlan_proc(void)
@@ -1254,3 +1254,4 @@
arlan_device_sysctl_header = NULL;
}
+#endif
diff -Nru a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
--- a/drivers/parport/parport_pc.c Sat Oct 25 11:45:10 2003
+++ b/drivers/parport/parport_pc.c Sat Oct 25 11:45:10 2003
@@ -2358,7 +2358,11 @@
release_region(base_hi, 3);
ECR_res = NULL;
}
-
+ /* Likewise for EEP ports */
+ if (EPP_res && (p->modes & PARPORT_MODE_EPP) == 0) {
+ release_region(base+3, 5);
+ EPP_res = NULL;
+ }
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_pc_interrupt,
0, p->name, p)) {
diff -Nru a/drivers/pci/quirks.c b/drivers/pci/quirks.c
--- a/drivers/pci/quirks.c Sat Oct 25 11:45:09 2003
+++ b/drivers/pci/quirks.c Sat Oct 25 11:45:09 2003
@@ -277,6 +277,22 @@
}
/*
+ * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
+ * 0x40 (128 bytes of ACPI, GPIO & TCO registers)
+ * 0x58 (64 bytes of GPIO I/O space)
+ */
+static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
+{
+ u32 region;
+
+ pci_read_config_dword(dev, 0x40, ®ion);
+ quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES);
+
+ pci_read_config_dword(dev, 0x58, ®ion);
+ quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1);
+}
+
+/*
* VIA ACPI: One IO region pointed to by longword at
* 0x48 or 0x20 (256 bytes of ACPI registers)
*/
@@ -748,6 +764,60 @@
sis_96x_compatible = 1;
}
+#ifdef CONFIG_SCSI_SATA
+static void __init quirk_intel_ide_combined(struct pci_dev *pdev)
+{
+ u8 prog, comb, tmp;
+
+ /*
+ * Narrow down to Intel SATA PCI devices.
+ */
+ switch (pdev->device) {
+ /* PCI ids taken from drivers/scsi/ata_piix.c */
+ case 0x24d1:
+ case 0x24df:
+ case 0x25a3:
+ case 0x25b0:
+ break;
+ default:
+ /* we do not handle this PCI device */
+ return;
+ }
+
+ /*
+ * Read combined mode register.
+ */
+ pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */
+ tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */
+ if (tmp == 0x4) /* bits 10x */
+ comb = (1 << 0); /* SATA port 0, PATA port 1 */
+ else if (tmp == 0x6) /* bits 11x */
+ comb = (1 << 2); /* PATA port 0, SATA port 1 */
+ else
+ return; /* not in combined mode */
+
+ /*
+ * Read programming interface register.
+ * (Tells us if it's legacy or native mode)
+ */
+ pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+
+ /* if SATA port is in native mode, we're ok. */
+ if (prog & comb)
+ return;
+
+ /* SATA port is in legacy mode. Reserve port so that
+ * IDE driver does not attempt to use it. If request_region
+ * fails, it will be obvious at boot time, so we don't bother
+ * checking return values.
+ */
+ if (comb == (1 << 0))
+ request_region(0x1f0, 8, "libata"); /* port 0 */
+ else
+ request_region(0x170, 8, "libata"); /* port 1 */
+}
+#endif /* CONFIG_SCSI_SATA */
+
/*
* The main table of quirks.
*
@@ -804,6 +874,7 @@
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_2, quirk_piix3_usb },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_2, quirk_piix3_usb },
@@ -850,6 +921,14 @@
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc },
+
+#ifdef CONFIG_SCSI_SATA
+ /* Fixup BIOSes that configure Parallel ATA (PATA / IDE) and
+ * Serial ATA (SATA) into the same PCI ID.
+ */
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ quirk_intel_ide_combined },
+#endif /* CONFIG_SCSI_SATA */
{ 0 }
};
diff -Nru a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
--- a/drivers/pcmcia/yenta_socket.c Sat Oct 25 11:45:09 2003
+++ b/drivers/pcmcia/yenta_socket.c Sat Oct 25 11:45:09 2003
@@ -509,6 +509,10 @@
#define BRIDGE_IO_MAX 256
#define BRIDGE_IO_MIN 32
+#ifndef PCIBIOS_MIN_CARDBUS_IO
+#define PCIBIOS_MIN_CARDBUS_IO PCIBIOS_MIN_IO
+#endif
+
static void yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type)
{
struct pci_bus *bus;
@@ -551,7 +555,7 @@
align = 1024;
size = BRIDGE_IO_MAX;
min = BRIDGE_IO_MIN;
- start = PCIBIOS_MIN_IO;
+ start = PCIBIOS_MIN_CARDBUS_IO;
end = ~0U;
} else {
unsigned long avail = root->end - root->start;
diff -Nru a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
--- a/drivers/scsi/Kconfig Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/Kconfig Sat Oct 25 11:45:09 2003
@@ -403,6 +403,58 @@
To compile this driver as a module, choose M here: the
module will be called megaraid.
+config SCSI_SATA
+ bool "Serial ATA (SATA) support"
+ depends on SCSI && EXPERIMENTAL
+ help
+ This driver family supports Serial ATA host controllers
+ and devices.
+
+ If unsure, say N.
+
+config SCSI_SATA_SVW
+ tristate "ServerWorks Frodo / Apple K2 SATA support (EXPERIMENTAL)"
+ depends on SCSI_SATA && PCI && EXPERIMENTAL
+ help
+ This option enables support for Broadcom/Serverworks/Apple K2
+ SATA support.
+
+ If unsure, say N.
+
+config SCSI_ATA_PIIX
+ tristate "Intel PIIX/ICH SATA support"
+ depends on SCSI_SATA && PCI
+ help
+ This option enables support for ICH5 Serial ATA.
+ If PATA support was enabled previously, this enables
+ support for select Intel PIIX/ICH PATA host controllers.
+
+ If unsure, say N.
+
+config SCSI_SATA_PROMISE
+ tristate "Promise SATA support"
+ depends on SCSI_SATA && PCI && EXPERIMENTAL
+ help
+ This option enables support for Promise Serial ATA.
+
+ If unsure, say N.
+
+config SCSI_SATA_SIL
+ tristate "Silicon Image SATA support"
+ depends on SCSI_SATA && PCI && BROKEN
+ help
+ This option enables support for Silicon Image Serial ATA.
+
+ If unsure, say N.
+
+config SCSI_SATA_VIA
+ tristate "VIA SATA support"
+ depends on SCSI_SATA && PCI && EXPERIMENTAL
+ help
+ This option enables support for VIA Serial ATA.
+
+ If unsure, say N.
+
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
depends on (PCI || ISA) && SCSI
diff -Nru a/drivers/scsi/Makefile b/drivers/scsi/Makefile
--- a/drivers/scsi/Makefile Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/Makefile Sat Oct 25 11:45:09 2003
@@ -112,6 +112,11 @@
obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o
obj-$(CONFIG_SCSI_LASI700) += lasi700.o 53c700.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
+obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
+obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
+obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
+obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
+obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
obj-$(CONFIG_ARM) += arm/
@@ -146,6 +151,7 @@
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \
cpqfcTSworker.o cpqfcTStrigger.o
+libata-objs := libata-core.o libata-scsi.o
# Files generated that shall be removed upon make clean
clean-files := 53c7xx_d.h 53c700_d.h \
diff -Nru a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
--- a/drivers/scsi/aacraid/aacraid.h Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/aacraid/aacraid.h Sat Oct 25 11:45:09 2003
@@ -14,6 +14,8 @@
#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
#define AAC_MAX_LUN (8)
+#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
+
/*
* These macros convert from physical channels to virtual channels
*/
diff -Nru a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
--- a/drivers/scsi/aacraid/comminit.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/aacraid/comminit.c Sat Oct 25 11:45:09 2003
@@ -92,7 +92,21 @@
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
init->AdapterFibsSize = cpu_to_le32(fibsize);
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
- init->HostPhysMemPages = cpu_to_le32(num_physpages); // number of 4k pages of host physical memory
+ /*
+ * number of 4k pages of host physical memory. The aacraid fw needs
+ * this number to be less than 4gb worth of pages. num_physpages is in
+ * system page units. New firmware doesn't have any issues with the
+ * mapping system, but older Firmware did, and had *troubles* dealing
+ * with the math overloading past 32 bits, thus we must limit this
+ * field.
+ */
+ if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
+ init->HostPhysMemPages =
+ cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
+ } else {
+ init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
+ }
+
/*
* Increment the base address by the amount already used
diff -Nru a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/ata_piix.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,590 @@
+/*
+
+ ata_piix.c - Intel PATA/SATA controllers
+
+
+ Copyright 2003 Red Hat Inc
+ Copyright 2003 Jeff Garzik
+
+
+ Copyright header from piix.c:
+
+ Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ Copyright (C) 1998-2000 Andre Hedrick
+ Copyright (C) 2003 Red Hat Inc
+
+ May be copied or modified under the terms of the GNU General Public License
+
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+
+#define DRV_NAME "ata_piix"
+#define DRV_VERSION "0.95"
+
+enum {
+ PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
+ ICH5_PCS = 0x92, /* port control and status */
+
+ PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */
+
+ PIIX_COMB_PRI = (1 << 0), /* combined mode, PATA primary */
+ PIIX_COMB_SEC = (1 << 1), /* combined mode, PATA secondary */
+
+ PIIX_80C_PRI = (1 << 5) | (1 << 4),
+ PIIX_80C_SEC = (1 << 7) | (1 << 6),
+
+ ich5_pata = 0,
+ ich5_sata = 1,
+ piix4_pata = 2,
+};
+
+static int piix_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static void piix_pata_phy_reset(struct ata_port *ap);
+static void piix_sata_phy_reset(struct ata_port *ap);
+static void piix_sata_port_disable(struct ata_port *ap);
+static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio);
+static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma);
+
+static unsigned int in_module_init = 1;
+
+static struct pci_device_id piix_pci_tbl[] = {
+#ifdef ATA_ENABLE_PATA
+ { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
+ { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
+ { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
+#endif
+
+ { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver piix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static Scsi_Host_Template piix_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = ATA_MAX_PRD,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+};
+
+static struct ata_port_operations piix_pata_ops = {
+ .port_disable = ata_port_disable,
+ .set_piomode = piix_set_piomode,
+ .set_udmamode = piix_set_udmamode,
+
+ .tf_load = ata_tf_load_pio,
+ .tf_read = ata_tf_read_pio,
+ .check_status = ata_check_status_pio,
+ .exec_command = ata_exec_command_pio,
+
+ .phy_reset = piix_pata_phy_reset,
+ .phy_config = pata_phy_config,
+
+ .bmdma_start = ata_bmdma_start_pio,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = ata_eng_timeout,
+
+ .irq_handler = ata_interrupt,
+};
+
+static struct ata_port_operations piix_sata_ops = {
+ .port_disable = piix_sata_port_disable,
+ .set_piomode = piix_set_piomode,
+ .set_udmamode = piix_set_udmamode,
+
+ .tf_load = ata_tf_load_pio,
+ .tf_read = ata_tf_read_pio,
+ .check_status = ata_check_status_pio,
+ .exec_command = ata_exec_command_pio,
+
+ .phy_reset = piix_sata_phy_reset,
+ .phy_config = pata_phy_config, /* not a typo */
+
+ .bmdma_start = ata_bmdma_start_pio,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = ata_eng_timeout,
+
+ .irq_handler = ata_interrupt,
+};
+
+static struct ata_port_info piix_port_info[] = {
+ /* ich5_pata */
+ {
+ .sht = &piix_sht,
+ .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */
+ .port_ops = &piix_pata_ops,
+ },
+
+ /* ich5_sata */
+ {
+ .sht = &piix_sht,
+ .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
+ ATA_FLAG_SRST,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &piix_sata_ops,
+ },
+
+ /* piix4_pata */
+ {
+ .sht = &piix_sht,
+ .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */
+ .port_ops = &piix_pata_ops,
+ },
+};
+
+static struct pci_bits piix_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
+};
+
+MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
+MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
+
+/**
+ * piix_pata_cbl_detect - Probe host controller cable detect info
+ * @ap: Port for which cable detect info is desired
+ *
+ * Read 80c cable indicator from SATA PCI device's PCI config
+ * register. This register is normally set by firmware (BIOS).
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static void piix_pata_cbl_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = ap->host_set->pdev;
+ u8 tmp, mask;
+
+ /* no 80c support in host controller? */
+ if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0)
+ goto cbl40;
+
+ /* check BIOS cable detect results */
+ mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
+ pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
+ if ((tmp & mask) == 0)
+ goto cbl40;
+
+ ap->cbl = ATA_CBL_PATA80;
+ return;
+
+cbl40:
+ ap->cbl = ATA_CBL_PATA40;
+ ap->udma_mask &= ATA_UDMA_MASK_40C;
+}
+
+/**
+ * piix_pata_phy_reset - Probe specified port on PATA host controller
+ * @ap: Port to probe
+ *
+ * Probe PATA phy.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_pata_phy_reset(struct ata_port *ap)
+{
+ if (!pci_test_config_bits(ap->host_set->pdev,
+ &piix_enable_bits[ap->port_no])) {
+ ata_port_disable(ap);
+ printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
+ return;
+ }
+
+ piix_pata_cbl_detect(ap);
+
+ ata_port_probe(ap);
+
+ ata_bus_reset(ap);
+}
+
+/**
+ * piix_pcs_probe - Probe SATA port configuration and status register
+ * @ap: Port to probe
+ * @have_port: (output) Non-zero if SATA port is enabled
+ * @have_device: (output) Non-zero if SATA phy indicates device present
+ *
+ * Reads SATA PCI device's PCI config register Port Configuration
+ * and Status (PCS) to determine port and device availability.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static void piix_pcs_probe (struct ata_port *ap, unsigned int *have_port,
+ unsigned int *have_device)
+{
+ struct pci_dev *pdev = ap->host_set->pdev;
+ u16 pcs;
+
+ pci_read_config_word(pdev, ICH5_PCS, &pcs);
+
+ /* is SATA port enabled? */
+ if (pcs & (1 << ap->port_no)) {
+ *have_port = 1;
+
+ if (pcs & (1 << (ap->port_no + 4)))
+ *have_device = 1;
+ }
+}
+
+/**
+ * piix_pcs_disable - Disable SATA port
+ * @ap: Port to disable
+ *
+ * Disable SATA phy for specified port.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static void piix_pcs_disable (struct ata_port *ap)
+{
+ struct pci_dev *pdev = ap->host_set->pdev;
+ u16 pcs;
+
+ pci_read_config_word(pdev, ICH5_PCS, &pcs);
+
+ if (pcs & (1 << ap->port_no)) {
+ pcs &= ~(1 << ap->port_no);
+ pci_write_config_word(pdev, ICH5_PCS, pcs);
+ }
+}
+
+/**
+ * piix_sata_phy_reset - Probe specified port on SATA host controller
+ * @ap: Port to probe
+ *
+ * Probe SATA phy.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_sata_phy_reset(struct ata_port *ap)
+{
+ unsigned int have_port = 0, have_dev = 0;
+
+ if (!pci_test_config_bits(ap->host_set->pdev,
+ &piix_enable_bits[ap->port_no])) {
+ ata_port_disable(ap);
+ printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
+ return;
+ }
+
+ piix_pcs_probe(ap, &have_port, &have_dev);
+
+ /* if port not enabled, exit */
+ if (!have_port) {
+ ata_port_disable(ap);
+ printk(KERN_INFO "ata%u: SATA port disabled. ignoring.\n",
+ ap->id);
+ return;
+ }
+
+ /* if port enabled but no device, disable port and exit */
+ if (!have_dev) {
+ piix_sata_port_disable(ap);
+ printk(KERN_INFO "ata%u: SATA port has no device. disabling.\n",
+ ap->id);
+ return;
+ }
+
+ ap->cbl = ATA_CBL_SATA;
+
+ ata_port_probe(ap);
+
+ ata_bus_reset(ap);
+}
+
+/**
+ * piix_sata_port_disable - Disable SATA port
+ * @ap: Port to disable.
+ *
+ * Disable SATA port.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_sata_port_disable(struct ata_port *ap)
+{
+ ata_port_disable(ap);
+ piix_pcs_disable(ap);
+}
+
+/**
+ * piix_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ * @pio: PIO mode, 0 - 4
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio)
+{
+ struct pci_dev *dev = ap->host_set->pdev;
+ unsigned int is_slave = (adev->flags & ATA_DFLAG_MASTER) ? 0 : 1;
+ unsigned int master_port= ap->port_no ? 0x42 : 0x40;
+ unsigned int slave_port = 0x44;
+ u16 master_data;
+ u8 slave_data;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(dev, master_port, &master_data);
+ if (is_slave) {
+ master_data |= 0x4000;
+ /* enable PPE, IE and TIME */
+ master_data |= 0x0070;
+ pci_read_config_byte(dev, slave_port, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ slave_data |=
+ (timings[pio][0] << 2) |
+ (timings[pio][1] << (ap->port_no ? 4 : 0));
+ } else {
+ master_data &= 0xccf8;
+ /* enable PPE, IE and TIME */
+ master_data |= 0x0007;
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+ pci_write_config_word(dev, master_port, master_data);
+ if (is_slave)
+ pci_write_config_byte(dev, slave_port, slave_data);
+}
+
+/**
+ * piix_set_udmamode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ * @udma: udma mode, 0 - 6
+ *
+ * Set UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma)
+{
+ struct pci_dev *dev = ap->host_set->pdev;
+ u8 maslave = ap->port_no ? 0x42 : 0x40;
+ u8 speed = udma;
+ unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno;
+ int a_speed = 3 << (drive_dn * 4);
+ int u_flag = 1 << drive_dn;
+ int v_flag = 0x01 << drive_dn;
+ int w_flag = 0x10 << drive_dn;
+ int u_speed = 0;
+ int sitre;
+ u16 reg4042, reg44, reg48, reg4a, reg54;
+ u8 reg55;
+
+ pci_read_config_word(dev, maslave, ®4042);
+ DPRINTK("reg4042 = 0x%04x\n", reg4042);
+ sitre = (reg4042 & 0x4000) ? 1 : 0;
+ pci_read_config_word(dev, 0x44, ®44);
+ pci_read_config_word(dev, 0x48, ®48);
+ pci_read_config_word(dev, 0x4a, ®4a);
+ pci_read_config_word(dev, 0x54, ®54);
+ pci_read_config_byte(dev, 0x55, ®55);
+
+ switch(speed) {
+ case XFER_UDMA_4:
+ case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
+ case XFER_UDMA_6:
+ case XFER_UDMA_5:
+ case XFER_UDMA_3:
+ case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
+ case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (!(reg48 & u_flag))
+ pci_write_config_word(dev, 0x48, reg48|u_flag);
+ if (speed == XFER_UDMA_5) {
+ pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
+ } else {
+ pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+ }
+ if (!(reg4a & u_speed)) {
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ pci_write_config_word(dev, 0x4a, reg4a|u_speed);
+ }
+ if (speed > XFER_UDMA_2) {
+ if (!(reg54 & v_flag)) {
+ pci_write_config_word(dev, 0x54, reg54|v_flag);
+ }
+ } else {
+ pci_write_config_word(dev, 0x54, reg54 & ~v_flag);
+ }
+}
+
+/**
+ * piix_probe_combined - Determine if PATA and SATA are combined
+ * @pdev: PCI device to examine
+ * @mask: (output) zero, %PIIX_COMB_PRI or %PIIX_COMB_SEC
+ *
+ * Determine if BIOS has secretly stuffed a PATA port into our
+ * otherwise-beautiful SATA PCI device.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ */
+static void piix_probe_combined (struct pci_dev *pdev, unsigned int *mask)
+{
+ u8 tmp;
+
+ pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */
+ tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */
+
+ /* backwards from what one might expect */
+ if (tmp == 0x4) /* bits 10x */
+ *mask |= PIIX_COMB_SEC;
+ if (tmp == 0x6) /* bits 11x */
+ *mask |= PIIX_COMB_PRI;
+}
+
+/**
+ * piix_init_one - Register PIIX ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in piix_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_port_info *port_info[2];
+ unsigned int combined = 0, n_ports = 1;
+ unsigned int pata_comb = 0, sata_comb = 0;
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ /* no hotplugging support (FIXME) */
+ if (!in_module_init)
+ return -ENODEV;
+
+ port_info[0] = &piix_port_info[ent->driver_data];
+ port_info[1] = NULL;
+ if (port_info[0]->host_flags & PIIX_FLAG_COMBINED)
+ piix_probe_combined(pdev, &combined);
+
+ if (combined & PIIX_COMB_PRI)
+ sata_comb = 1;
+ else if (combined & PIIX_COMB_SEC)
+ pata_comb = 1;
+
+ if (pata_comb || sata_comb) {
+ port_info[sata_comb] = &piix_port_info[ent->driver_data];
+ port_info[sata_comb]->host_flags |= ATA_FLAG_SLAVE_POSS; /* sigh */
+ port_info[pata_comb] = &piix_port_info[ich5_pata]; /*ich5-specific*/
+ n_ports++;
+
+ printk(KERN_WARNING DRV_NAME ": combined mode detected\n");
+ }
+
+ return ata_pci_init_one(pdev, port_info, n_ports);
+}
+
+/**
+ * piix_init -
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static int __init piix_init(void)
+{
+ int rc;
+
+ DPRINTK("pci_module_init\n");
+ rc = pci_module_init(&piix_pci_driver);
+ if (rc)
+ return rc;
+
+ in_module_init = 0;
+
+ DPRINTK("done\n");
+ return 0;
+}
+
+/**
+ * piix_exit -
+ *
+ * LOCKING:
+ *
+ */
+
+static void __exit piix_exit(void)
+{
+ pci_unregister_driver(&piix_pci_driver);
+}
+
+module_init(piix_init);
+module_exit(piix_exit);
+
diff -Nru a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
--- a/drivers/scsi/atp870u.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/atp870u.c Sat Oct 25 11:45:09 2003
@@ -2335,11 +2335,8 @@
break;
}
}
- for (h = 0; h < MAX_ATP; h++) {
+ for (h = 0; h < card; h++) {
struct atp_unit tmp, *dev;
- if (pdev[h] == NULL) {
- return count;
- }
/* Found an atp870u/w. */
base_io = pci_resource_start(pdev[h], 0);
diff -Nru a/drivers/scsi/constants.c b/drivers/scsi/constants.c
--- a/drivers/scsi/constants.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/constants.c Sat Oct 25 11:45:09 2003
@@ -62,42 +62,54 @@
static const char *group_2_commands[] = {
-/* 40-41 */ "Change Definition", "Write Same",
-/* 42-48 */ "Read sub-channel", "Read TOC", "Read header",
- "Play audio (10)", unknown, "Play audio msf",
- "Play audio track/index",
-/* 49-4f */ "Play track relative (10)", unknown, "Pause/resume",
- "Log Select", "Log Sense", unknown, unknown,
-/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
-/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
-/* 5c-5f */ unknown, unknown, unknown,
+/* 40-41 */ "Change Definition", "Write Same",
+/* 42-48 */ "Read sub-channel", "Read TOC", "Read header",
+ "Play audio (10)", "Get configuration", "Play audio msf",
+ "Play audio track/index",
+/* 49-4f */ "Play track relative (10)", "Get event status notification",
+ "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
+ unknown,
+/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
+ "Reserve track", "Send OPC onfo", "Mode Select (10)",
+/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue",
+ "Mode Sense (10)", "Close track/session",
+/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
+ "Persistent reserve out",
};
/* The following are 16 byte commands in group 4 */
static const char *group_4_commands[] = {
-/* 80-84 */ unknown, unknown, unknown, unknown, unknown,
-/* 85-89 */ "Memory Export In (16)", unknown, unknown, unknown,
- "Memory Export Out (16)",
-/* 8a-8f */ unknown, unknown, unknown, unknown, unknown, unknown,
-/* 90-94 */ unknown, unknown, unknown, unknown, unknown,
+/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy",
+ "Receive copy results",
+/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out",
+ "Read (16)", "Memory Export Out (16)",
+/* 8a-8f */ "Write (16)", unknown, "Read attributes", "Write attributes",
+ "Write and verify (16)", "Verify (16)",
+/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)",
+ "Lock/unlock cache (16)", "Write same (16)", unknown,
/* 95-99 */ unknown, unknown, unknown, unknown, unknown,
-/* 9a-9f */ unknown, unknown, unknown, unknown, unknown, unknown,
+/* 9a-9f */ unknown, unknown, unknown, unknown, "Service action in",
+ "Service action out",
};
-
/* The following are 12 byte commands in group 5 */
static const char *group_5_commands[] = {
-/* a0-a5 */ unknown, unknown, unknown, unknown, unknown,
- "Move medium/play audio(12)",
-/* a6-a9 */ "Exchange medium", unknown, "Read(12)", "Play track relative(12)",
-/* aa-ae */ "Write(12)", unknown, "Erase(12)", unknown,
- "Write and verify(12)",
+/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance (in)",
+ "Maintenance (out)", "Move medium/play audio(12)",
+/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
+ "Play track relative(12)",
+/* aa-ae */ "Write(12)", unknown, "Erase(12), Get Performance",
+ "Read DVD structure", "Write and verify(12)",
/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
-/* b2-b4 */ "Search data low(12)", "Set limits(12)", unknown,
-/* b5-b6 */ "Request volume element address", "Send volume tag",
-/* b7-b9 */ "Read defect data(12)", "Read element status", unknown,
-/* ba-bf */ unknown, unknown, unknown, unknown, unknown, unknown,
+/* b2-b4 */ "Search data low(12)", "Set limits(12)",
+ "Read element status attached",
+/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming",
+/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
+/* ba-bc */ "Redundancy group (in), Scan",
+ "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
+/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
+ "Volume set (out), Send DVD structure",
};
diff -Nru a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/libata-core.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,3292 @@
+/*
+ libata-core.c - helper library for ATA
+
+ Copyright 2003 Red Hat, Inc. All rights reserved.
+ Copyright 2003 Jeff Garzik
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+#include
+#include
+
+#include "libata.h"
+
+static void atapi_cdb_send(struct ata_port *ap);
+static unsigned int ata_busy_sleep (struct ata_port *ap,
+ unsigned long tmout_pat,
+ unsigned long tmout);
+static void __ata_dev_select (struct ata_port *ap, unsigned int device);
+static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append);
+static void ata_dma_complete(struct ata_port *ap, u8 host_stat,
+ unsigned int done_late);
+static void ata_host_set_pio(struct ata_port *ap);
+static void ata_host_set_udma(struct ata_port *ap);
+static void ata_dev_set_pio(struct ata_port *ap, unsigned int device);
+static void ata_dev_set_udma(struct ata_port *ap, unsigned int device);
+
+static unsigned int ata_unique_id = 1;
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Library module for ATA devices");
+MODULE_LICENSE("GPL");
+
+static const char * thr_state_name[] = {
+ "THR_UNKNOWN",
+ "THR_PORT_RESET",
+ "THR_AWAIT_DEATH",
+ "THR_PROBE_FAILED",
+ "THR_IDLE",
+ "THR_PROBE_SUCCESS",
+ "THR_PROBE_START",
+ "THR_PIO_POLL",
+ "THR_PIO_TMOUT",
+ "THR_PIO",
+ "THR_PIO_LAST",
+ "THR_PIO_LAST_POLL",
+ "THR_PIO_ERR",
+ "THR_PACKET",
+};
+
+/**
+ * ata_thr_state_name - convert thread state enum to string
+ * @thr_state: thread state to be converted to string
+ *
+ * Converts the specified thread state id to a constant C string.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * The THR_xxx-prefixed string naming the specified thread
+ * state id, or the string "".
+ */
+
+static const char *ata_thr_state_name(unsigned int thr_state)
+{
+ if (thr_state < ARRAY_SIZE(thr_state_name))
+ return thr_state_name[thr_state];
+ return "";
+}
+
+/**
+ * msleep - sleep for a number of milliseconds
+ * @msecs: number of milliseconds to sleep
+ *
+ * Issues schedule_timeout call for the specified number
+ * of milliseconds.
+ *
+ * LOCKING:
+ * None.
+ */
+
+static void msleep(unsigned long msecs)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(msecs));
+}
+
+/**
+ * ata_tf_load_pio - send taskfile registers to host controller
+ * @ioaddr: set of IO ports to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Outputs ATA taskfile to standard ATA host controller using PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ outb(tf->ctl, ioaddr->ctl_addr);
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ outb(tf->hob_feature, ioaddr->error_addr);
+ outb(tf->hob_nsect, ioaddr->nsect_addr);
+ outb(tf->hob_lbal, ioaddr->lbal_addr);
+ outb(tf->hob_lbam, ioaddr->lbam_addr);
+ outb(tf->hob_lbah, ioaddr->lbah_addr);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ outb(tf->feature, ioaddr->error_addr);
+ outb(tf->nsect, ioaddr->nsect_addr);
+ outb(tf->lbal, ioaddr->lbal_addr);
+ outb(tf->lbam, ioaddr->lbam_addr);
+ outb(tf->lbah, ioaddr->lbah_addr);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ outb(tf->device, ioaddr->device_addr);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+/**
+ * ata_tf_load_mmio - send taskfile registers to host controller
+ * @ioaddr: set of IO ports to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Outputs ATA taskfile to standard ATA host controller using MMIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ writeb(tf->ctl, ap->ioaddr.ctl_addr);
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ writeb(tf->hob_feature, (void *) ioaddr->error_addr);
+ writeb(tf->hob_nsect, (void *) ioaddr->nsect_addr);
+ writeb(tf->hob_lbal, (void *) ioaddr->lbal_addr);
+ writeb(tf->hob_lbam, (void *) ioaddr->lbam_addr);
+ writeb(tf->hob_lbah, (void *) ioaddr->lbah_addr);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ writeb(tf->feature, (void *) ioaddr->error_addr);
+ writeb(tf->nsect, (void *) ioaddr->nsect_addr);
+ writeb(tf->lbal, (void *) ioaddr->lbal_addr);
+ writeb(tf->lbam, (void *) ioaddr->lbam_addr);
+ writeb(tf->lbah, (void *) ioaddr->lbah_addr);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ writeb(tf->device, (void *) ioaddr->device_addr);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+/**
+ * ata_exec_command_pio - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues PIO write to ATA command register, with proper
+ * synchronization with interrupt handler / other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+
+ outb(tf->command, ap->ioaddr.cmdstat_addr);
+ ata_pause(ap);
+}
+
+
+/**
+ * ata_exec_command_mmio - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues MMIO write to ATA command register, with proper
+ * synchronization with interrupt handler / other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+
+ writeb(tf->command, (void *) ap->ioaddr.cmdstat_addr);
+ ata_pause(ap);
+}
+
+/**
+ * ata_exec - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues PIO write to ATA command register, with proper
+ * synchronization with interrupt handler / other threads.
+ *
+ * LOCKING:
+ * Obtains host_set lock.
+ */
+
+static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ unsigned long flags;
+
+ DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->ops->exec_command(ap, tf);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
+ * ata_tf_to_host - issue ATA taskfile to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA taskfile register set to ATA host controller,
+ * via PIO, with proper synchronization with interrupt handler and
+ * other threads.
+ *
+ * LOCKING:
+ * Obtains host_set lock.
+ */
+
+static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ init_MUTEX_LOCKED(&ap->sem);
+
+ ap->ops->tf_load(ap, tf);
+
+ ata_exec(ap, tf);
+}
+
+/**
+ * ata_tf_to_host_nolock - issue ATA taskfile to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA taskfile register set to ATA host controller,
+ * via PIO, with proper synchronization with interrupt handler and
+ * other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ init_MUTEX_LOCKED(&ap->sem);
+
+ ap->ops->tf_load(ap, tf);
+ ap->ops->exec_command(ap, tf);
+}
+
+/**
+ * ata_tf_read_pio - input device's ATA taskfile shadow registers
+ * @ioaddr: set of IO ports from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Reads ATA taskfile registers for currently-selected device
+ * into @tf via PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->nsect = inb(ioaddr->nsect_addr);
+ tf->lbal = inb(ioaddr->lbal_addr);
+ tf->lbam = inb(ioaddr->lbam_addr);
+ tf->lbah = inb(ioaddr->lbah_addr);
+ tf->device = inb(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = inb(ioaddr->error_addr);
+ tf->hob_nsect = inb(ioaddr->nsect_addr);
+ tf->hob_lbal = inb(ioaddr->lbal_addr);
+ tf->hob_lbam = inb(ioaddr->lbam_addr);
+ tf->hob_lbah = inb(ioaddr->lbah_addr);
+ }
+}
+
+/**
+ * ata_tf_read_mmio - input device's ATA taskfile shadow registers
+ * @ioaddr: set of IO ports from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Reads ATA taskfile registers for currently-selected device
+ * into @tf via MMIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->nsect = readb((void *)ioaddr->nsect_addr);
+ tf->lbal = readb((void *)ioaddr->lbal_addr);
+ tf->lbam = readb((void *)ioaddr->lbam_addr);
+ tf->lbah = readb((void *)ioaddr->lbah_addr);
+ tf->device = readb((void *)ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ writeb(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
+ tf->hob_feature = readb((void *)ioaddr->error_addr);
+ tf->hob_nsect = readb((void *)ioaddr->nsect_addr);
+ tf->hob_lbal = readb((void *)ioaddr->lbal_addr);
+ tf->hob_lbam = readb((void *)ioaddr->lbam_addr);
+ tf->hob_lbah = readb((void *)ioaddr->lbah_addr);
+ }
+}
+
+/**
+ * ata_check_status_pio - Read device status reg & clear interrupt
+ * @ap: port where the device is
+ *
+ * Reads ATA taskfile status register for currently-selected device
+ * via PIO and return it's value. This also clears pending interrupts
+ * from this device
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+u8 ata_check_status_pio(struct ata_port *ap)
+{
+ return inb(ap->ioaddr.cmdstat_addr);
+}
+
+/**
+ * ata_check_status_mmio - Read device status reg & clear interrupt
+ * @ap: port where the device is
+ *
+ * Reads ATA taskfile status register for currently-selected device
+ * via MMIO and return it's value. This also clears pending interrupts
+ * from this device
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+u8 ata_check_status_mmio(struct ata_port *ap)
+{
+ return readb((void *) ap->ioaddr.cmdstat_addr);
+}
+
+static const char * udma_str[] = {
+ "UDMA/16",
+ "UDMA/25",
+ "UDMA/33",
+ "UDMA/44",
+ "UDMA/66",
+ "UDMA/100",
+ "UDMA/133",
+ "UDMA7",
+};
+
+/**
+ * ata_udma_string - convert UDMA bit offset to string
+ * @udma_mask: mask of bits supported; only highest bit counts.
+ *
+ * Determine string which represents the highest speed
+ * (highest bit in @udma_mask).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Constant C string representing highest speed listed in
+ * @udma_mask, or the constant C string "".
+ */
+
+static const char *ata_udma_string(unsigned int udma_mask)
+{
+ int i;
+
+ for (i = 7; i >= 0; i--) {
+ if (udma_mask & (1 << i))
+ return udma_str[i];
+ }
+
+ return "";
+}
+
+/**
+ * ata_pio_devchk -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ *
+ */
+
+static unsigned int ata_pio_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ __ata_dev_select(ap, device);
+
+ outb(0x55, ioaddr->nsect_addr);
+ outb(0xaa, ioaddr->lbal_addr);
+
+ outb(0xaa, ioaddr->nsect_addr);
+ outb(0x55, ioaddr->lbal_addr);
+
+ outb(0x55, ioaddr->nsect_addr);
+ outb(0xaa, ioaddr->lbal_addr);
+
+ nsect = inb(ioaddr->nsect_addr);
+ lbal = inb(ioaddr->lbal_addr);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/**
+ * ata_mmio_devchk -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ *
+ */
+
+static unsigned int ata_mmio_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ __ata_dev_select(ap, device);
+
+ writeb(0x55, (void *) ioaddr->nsect_addr);
+ writeb(0xaa, (void *) ioaddr->lbal_addr);
+
+ writeb(0xaa, (void *) ioaddr->nsect_addr);
+ writeb(0x55, (void *) ioaddr->lbal_addr);
+
+ writeb(0x55, (void *) ioaddr->nsect_addr);
+ writeb(0xaa, (void *) ioaddr->lbal_addr);
+
+ nsect = readb((void *) ioaddr->nsect_addr);
+ lbal = readb((void *) ioaddr->lbal_addr);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/**
+ * ata_dev_devchk -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ *
+ */
+
+static unsigned int ata_dev_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ if (ap->flags & ATA_FLAG_MMIO)
+ return ata_mmio_devchk(ap, device);
+ return ata_pio_devchk(ap, device);
+}
+
+/**
+ * ata_dev_classify - determine device type based on ATA-spec signature
+ * @tf: ATA taskfile register set for device to be identified
+ *
+ * Determine from taskfile register contents whether a device is
+ * ATA or ATAPI, as per "Signature and persistence" section
+ * of ATA/PI spec (volume 1, sect 5.14).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
+ * the event of failure.
+ */
+
+static unsigned int ata_dev_classify(struct ata_taskfile *tf)
+{
+ /* Apple's open source Darwin code hints that some devices only
+ * put a proper signature into the LBA mid/high registers,
+ * So, we only check those. It's sufficient for uniqueness.
+ */
+
+ if (((tf->lbam == 0) && (tf->lbah == 0)) ||
+ ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
+ DPRINTK("found ATA device by sig\n");
+ return ATA_DEV_ATA;
+ }
+
+ if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
+ ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
+ DPRINTK("found ATAPI device by sig\n");
+ return ATA_DEV_ATAPI;
+ }
+
+ DPRINTK("unknown device\n");
+ return ATA_DEV_UNKNOWN;
+}
+
+/**
+ * ata_dev_try_classify -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ *
+ */
+
+static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device,
+ unsigned int maybe_have_dev)
+{
+ struct ata_device *dev = &ap->device[device];
+ struct ata_taskfile tf;
+ unsigned int class;
+ u8 err;
+
+ __ata_dev_select(ap, device);
+
+ memset(&tf, 0, sizeof(tf));
+
+ err = ata_chk_err(ap);
+ ap->ops->tf_read(ap, &tf);
+
+ dev->class = ATA_DEV_NONE;
+
+ /* see if device passed diags */
+ if (err == 1)
+ /* do nothing */ ;
+ else if ((device == 0) && (err == 0x81))
+ /* do nothing */ ;
+ else
+ return err;
+
+ /* determine if device if ATA or ATAPI */
+ class = ata_dev_classify(&tf);
+ if (class == ATA_DEV_UNKNOWN)
+ return err;
+ if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
+ return err;
+
+ dev->class = class;
+
+ return err;
+}
+
+/**
+ * ata_dev_id_string -
+ * @dev:
+ * @s:
+ * @ofs:
+ * @len:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned int c, ret = 0;
+
+ while (len > 0) {
+ c = dev->id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ ret = c = dev->id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+
+ return ret;
+}
+
+/**
+ * ata_dev_parse_strings -
+ * @dev:
+ *
+ * LOCKING:
+ */
+
+static void ata_dev_parse_strings(struct ata_device *dev)
+{
+ assert (dev->class == ATA_DEV_ATA);
+ memcpy(dev->vendor, "ATA ", 8);
+
+ ata_dev_id_string(dev, dev->product, ATA_ID_PROD_OFS,
+ sizeof(dev->product));
+}
+
+/**
+ * __ata_dev_select -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ *
+ */
+
+static void __ata_dev_select (struct ata_port *ap, unsigned int device)
+{
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ if (ap->flags & ATA_FLAG_MMIO) {
+ writeb(tmp, (void *) ap->ioaddr.device_addr);
+ } else {
+ outb(tmp, ap->ioaddr.device_addr);
+ }
+ ata_pause(ap); /* needed; also flushes, for mmio */
+}
+
+/**
+ * ata_dev_select -
+ * @ap:
+ * @device:
+ * @wait:
+ * @can_sleep:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+void ata_dev_select(struct ata_port *ap, unsigned int device,
+ unsigned int wait, unsigned int can_sleep)
+{
+ VPRINTK("ENTER, ata%u: device %u, wait %u\n",
+ ap->id, device, wait);
+
+ if (wait)
+ ata_wait_idle(ap);
+
+ __ata_dev_select(ap, device);
+
+ if (wait) {
+ if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
+ msleep(150);
+ ata_wait_idle(ap);
+ }
+}
+
+/**
+ * ata_dump_id -
+ * @dev:
+ *
+ * LOCKING:
+ */
+
+static inline void ata_dump_id(struct ata_device *dev)
+{
+ DPRINTK("49==0x%04x "
+ "53==0x%04x "
+ "63==0x%04x "
+ "64==0x%04x "
+ "75==0x%04x \n",
+ dev->id[49],
+ dev->id[53],
+ dev->id[63],
+ dev->id[64],
+ dev->id[75]);
+ DPRINTK("80==0x%04x "
+ "81==0x%04x "
+ "82==0x%04x "
+ "83==0x%04x "
+ "84==0x%04x \n",
+ dev->id[80],
+ dev->id[81],
+ dev->id[82],
+ dev->id[83],
+ dev->id[84]);
+ DPRINTK("88==0x%04x "
+ "93==0x%04x\n",
+ dev->id[88],
+ dev->id[93]);
+}
+
+/**
+ * ata_dev_identify - obtain IDENTIFY x DEVICE page
+ * @ap: port on which device we wish to probe resides
+ * @device: device bus address, starting at zero
+ *
+ * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
+ * command, and read back the 512-byte device information page.
+ * The device information page is fed to us via the standard
+ * PIO-IN protocol, but we hand-code it here. (TODO: investigate
+ * using standard PIO-IN paths)
+ *
+ * After reading the device information page, we use several
+ * bits of information from it to initialize data structures
+ * that will be used during the lifetime of the ata_device.
+ * Other data from the info page is used to disqualify certain
+ * older ATA devices we do not wish to support.
+ *
+ * LOCKING:
+ * Inherited from caller. Some functions called by this function
+ * obtain the host_set lock.
+ */
+
+static void ata_dev_identify(struct ata_port *ap, unsigned int device)
+{
+ struct ata_device *dev = &ap->device[device];
+ unsigned int i;
+ u16 tmp, udma_modes;
+ u8 status;
+ struct ata_taskfile tf;
+ unsigned int using_edd;
+
+ if (!ata_dev_present(dev)) {
+ DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
+ ap->id, device);
+ return;
+ }
+
+ if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
+ using_edd = 0;
+ else
+ using_edd = 1;
+
+ DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
+
+ assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
+ dev->class == ATA_DEV_NONE);
+
+ ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
+
+retry:
+ ata_tf_init(ap, &tf, device);
+ tf.ctl |= ATA_NIEN;
+ tf.protocol = ATA_PROT_PIO_READ;
+
+ if (dev->class == ATA_DEV_ATA) {
+ tf.command = ATA_CMD_ID_ATA;
+ DPRINTK("do ATA identify\n");
+ } else {
+ tf.command = ATA_CMD_ID_ATAPI;
+ DPRINTK("do ATAPI identify\n");
+ }
+
+ ata_tf_to_host(ap, &tf);
+
+ /* crazy ATAPI devices... */
+ if (dev->class == ATA_DEV_ATAPI)
+ msleep(150);
+
+ if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT))
+ goto err_out;
+
+ status = ata_chk_status(ap);
+ if (status & ATA_ERR) {
+ /*
+ * arg! EDD works for all test cases, but seems to return
+ * the ATA signature for some ATAPI devices. Until the
+ * reason for this is found and fixed, we fix up the mess
+ * here. If IDENTIFY DEVICE returns command aborted
+ * (as ATAPI devices do), then we issue an
+ * IDENTIFY PACKET DEVICE.
+ *
+ * ATA software reset (SRST, the default) does not appear
+ * to have this problem.
+ */
+ if ((using_edd) && (tf.command == ATA_CMD_ID_ATA)) {
+ u8 err = ata_chk_err(ap);
+ if (err & ATA_ABORTED) {
+ dev->class = ATA_DEV_ATAPI;
+ goto retry;
+ }
+ }
+ goto err_out;
+ }
+
+ /* make sure we have BSY=0, DRQ=1 */
+ if ((status & ATA_DRQ) == 0) {
+ printk(KERN_WARNING "ata%u: dev %u (ATA%s?) not returning id page (0x%x)\n",
+ ap->id, device,
+ dev->class == ATA_DEV_ATA ? "" : "PI",
+ status);
+ goto err_out;
+ }
+
+ /* read IDENTIFY [X] DEVICE page */
+ if (ap->flags & ATA_FLAG_MMIO) {
+ for (i = 0; i < ATA_ID_WORDS; i++)
+ dev->id[i] = readw((void *)ap->ioaddr.data_addr);
+ } else
+ for (i = 0; i < ATA_ID_WORDS; i++)
+ dev->id[i] = inw(ap->ioaddr.data_addr);
+
+ /* wait for host_idle */
+ status = ata_wait_idle(ap);
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ printk(KERN_WARNING "ata%u: dev %u (ATA%s?) error after id page (0x%x)\n",
+ ap->id, device,
+ dev->class == ATA_DEV_ATA ? "" : "PI",
+ status);
+ goto err_out;
+ }
+
+ ata_irq_on(ap); /* re-enable interrupts */
+
+ /* print device capabilities */
+ printk(KERN_DEBUG "ata%u: dev %u cfg "
+ "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+ ap->id, device, dev->id[49],
+ dev->id[82], dev->id[83], dev->id[84],
+ dev->id[85], dev->id[86], dev->id[87],
+ dev->id[88]);
+
+ /*
+ * common ATA, ATAPI feature tests
+ */
+
+ /* we require LBA and DMA support (bits 8 & 9 of word 49) */
+ if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) {
+ printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
+ goto err_out_nosup;
+ }
+
+ /* we require UDMA support */
+ udma_modes =
+ tmp = dev->id[ATA_ID_UDMA_MODES];
+ if ((tmp & 0xff) == 0) {
+ printk(KERN_DEBUG "ata%u: no udma\n", ap->id);
+ goto err_out_nosup;
+ }
+
+ ata_dump_id(dev);
+
+ ata_dev_parse_strings(dev);
+
+ /* ATA-specific feature tests */
+ if (dev->class == ATA_DEV_ATA) {
+ if (!ata_id_is_ata(dev)) /* sanity check */
+ goto err_out_nosup;
+
+ tmp = dev->id[ATA_ID_MAJOR_VER];
+ for (i = 14; i >= 1; i--)
+ if (tmp & (1 << i))
+ break;
+
+ /* we require at least ATA-3 */
+ if (i < 3) {
+ printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
+ goto err_out_nosup;
+ }
+
+ if (ata_id_has_lba48(dev)) {
+ dev->flags |= ATA_DFLAG_LBA48;
+ dev->n_sectors = ata_id_u64(dev, 100);
+ } else {
+ dev->n_sectors = ata_id_u32(dev, 60);
+ }
+
+ ap->host->max_cmd_len = 16;
+
+ /* print device info to dmesg */
+ printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors%s\n",
+ ap->id, device,
+ ata_udma_string(udma_modes),
+ dev->n_sectors,
+ dev->flags & ATA_DFLAG_LBA48 ? " (lba48)" : "");
+ }
+
+ /* ATAPI-specific feature tests */
+ else {
+ if (ata_id_is_ata(dev)) /* sanity check */
+ goto err_out_nosup;
+
+ /* see if 16-byte commands supported */
+ tmp = dev->id[0] & 0x3;
+ if (tmp == 1)
+ ap->host->max_cmd_len = 16;
+
+ /* print device info to dmesg */
+ printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
+ ap->id, device,
+ ata_udma_string(udma_modes));
+ }
+
+ DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
+ return;
+
+err_out_nosup:
+ printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
+ ap->id, device);
+err_out:
+ ata_irq_on(ap); /* re-enable interrupts */
+ dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
+ DPRINTK("EXIT, err\n");
+}
+
+/**
+ * ata_port_reset -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+static void ata_port_reset(struct ata_port *ap)
+{
+ unsigned int i, found = 0;
+
+ ap->ops->phy_reset(ap);
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ goto err_out;
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ ata_dev_identify(ap, i);
+ if (ata_dev_present(&ap->device[i])) {
+ found = 1;
+ if (ap->ops->dev_config)
+ ap->ops->dev_config(ap, &ap->device[i]);
+ }
+ }
+
+ if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+ goto err_out_disable;
+
+ ap->ops->phy_config(ap);
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ goto err_out_disable;
+
+ ap->thr_state = THR_PROBE_SUCCESS;
+
+ return;
+
+err_out_disable:
+ ap->ops->port_disable(ap);
+err_out:
+ ap->thr_state = THR_PROBE_FAILED;
+}
+
+/**
+ * ata_port_probe -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+void ata_port_probe(struct ata_port *ap)
+{
+ ap->flags &= ~ATA_FLAG_PORT_DISABLED;
+}
+
+/**
+ * sata_phy_reset -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ */
+void sata_phy_reset(struct ata_port *ap)
+{
+ u32 sstatus;
+ unsigned long timeout = jiffies + (HZ * 5);
+
+ scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
+ scr_read(ap, SCR_CONTROL); /* dummy read; flush */
+ udelay(400); /* FIXME: a guess */
+ scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/reset */
+
+ /* wait for phy to become ready, if necessary */
+ do {
+ msleep(200);
+ sstatus = scr_read(ap, SCR_STATUS);
+ if ((sstatus & 0xf) != 1)
+ break;
+ } while (time_before(jiffies, timeout));
+
+ /* TODO: phy layer with polling, timeouts, etc. */
+ if (sata_dev_present(ap))
+ ata_port_probe(ap);
+ else {
+ sstatus = scr_read(ap, SCR_STATUS);
+ printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
+ ap->id, sstatus);
+ ata_port_disable(ap);
+ }
+
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+
+ ata_bus_reset(ap);
+}
+
+/**
+ * ata_port_disable -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+void ata_port_disable(struct ata_port *ap)
+{
+ ap->device[0].class = ATA_DEV_NONE;
+ ap->device[1].class = ATA_DEV_NONE;
+ ap->flags |= ATA_FLAG_PORT_DISABLED;
+}
+
+/**
+ * pata_phy_config -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ */
+void pata_phy_config(struct ata_port *ap)
+{
+ unsigned int force_pio;
+
+ ata_host_set_pio(ap);
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+
+ ata_host_set_udma(ap);
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+
+#ifdef ATA_FORCE_PIO
+ force_pio = 1;
+#else
+ force_pio = 0;
+#endif
+
+ if (force_pio) {
+ ata_dev_set_pio(ap, 0);
+ ata_dev_set_pio(ap, 1);
+
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+ } else {
+ ata_dev_set_udma(ap, 0);
+ ata_dev_set_udma(ap, 1);
+
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+ }
+
+}
+
+/**
+ * ata_busy_sleep - sleep until BSY clears, or timeout
+ * @ap: port containing status register to be polled
+ * @tmout_pat: impatience timeout
+ * @tmout: overall timeout
+ *
+ * LOCKING:
+ *
+ */
+
+static unsigned int ata_busy_sleep (struct ata_port *ap,
+ unsigned long tmout_pat,
+ unsigned long tmout)
+{
+ unsigned long timer_start, timeout;
+ u8 status;
+
+ status = ata_busy_wait(ap, ATA_BUSY, 300);
+ timer_start = jiffies;
+ timeout = timer_start + tmout_pat;
+ while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+ msleep(50);
+ status = ata_busy_wait(ap, ATA_BUSY, 3);
+ }
+
+ if (status & ATA_BUSY)
+ printk(KERN_WARNING "ata%u is slow to respond, "
+ "please be patient\n", ap->id);
+
+ timeout = timer_start + tmout;
+ while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+ msleep(50);
+ status = ata_chk_status(ap);
+ }
+
+ if (status & ATA_BUSY) {
+ printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
+ ap->id, tmout / HZ);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int dev0 = devmask & (1 << 0);
+ unsigned int dev1 = devmask & (1 << 1);
+ unsigned long timeout;
+
+ /* if device 0 was found in ata_dev_devchk, wait for its
+ * BSY bit to clear
+ */
+ if (dev0)
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ /* if device 1 was found in ata_dev_devchk, wait for
+ * register access, then wait for BSY to clear
+ */
+ timeout = jiffies + ATA_TMOUT_BOOT;
+ while (dev1) {
+ u8 nsect, lbal;
+
+ __ata_dev_select(ap, 1);
+ if (ap->flags & ATA_FLAG_MMIO) {
+ nsect = readb((void *) ioaddr->nsect_addr);
+ lbal = readb((void *) ioaddr->lbal_addr);
+ } else {
+ nsect = inb(ioaddr->nsect_addr);
+ lbal = inb(ioaddr->lbal_addr);
+ }
+ if ((nsect == 1) && (lbal == 1))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev1 = 0;
+ break;
+ }
+ msleep(50); /* give drive a breather */
+ }
+ if (dev1)
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ /* is all this really necessary? */
+ __ata_dev_select(ap, 0);
+ if (dev1)
+ __ata_dev_select(ap, 1);
+ if (dev0)
+ __ata_dev_select(ap, 0);
+}
+
+/**
+ * ata_bus_edd -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ */
+
+static unsigned int ata_bus_edd(struct ata_port *ap)
+{
+ struct ata_taskfile tf;
+
+ /* set up execute-device-diag (bus reset) taskfile */
+ /* also, take interrupts to a known state (disabled) */
+ DPRINTK("execute-device-diag\n");
+ ata_tf_init(ap, &tf, 0);
+ tf.ctl |= ATA_NIEN;
+ tf.command = ATA_CMD_EDD;
+ tf.protocol = ATA_PROT_NODATA;
+
+ /* do bus reset */
+ ata_tf_to_host(ap, &tf);
+
+ /* spec says at least 2ms. but who knows with those
+ * crazy ATAPI devices...
+ */
+ msleep(150);
+
+ return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+}
+
+static unsigned int ata_bus_softreset(struct ata_port *ap,
+ unsigned int devmask)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->id);
+
+ /* software reset. causes dev0 to be selected */
+ if (ap->flags & ATA_FLAG_MMIO) {
+ writeb(ap->ctl, ioaddr->ctl_addr);
+ udelay(10); /* FIXME: flush */
+ writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(10); /* FIXME: flush */
+ writeb(ap->ctl, ioaddr->ctl_addr);
+ } else {
+ outb(ap->ctl, ioaddr->ctl_addr);
+ udelay(10);
+ outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(10);
+ outb(ap->ctl, ioaddr->ctl_addr);
+ }
+
+ /* spec mandates ">= 2ms" before checking status.
+ * We wait 150ms, because that was the magic delay used for
+ * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+ * between when the ATA command register is written, and then
+ * status is checked. Because waiting for "a while" before
+ * checking status is fine, post SRST, we perform this magic
+ * delay here as well.
+ */
+ msleep(150);
+
+ ata_bus_post_reset(ap, devmask);
+
+ return 0;
+}
+
+/**
+ * ata_bus_reset - reset host port and associated ATA channel
+ * @ap: port to reset
+ *
+ * This is typically the first time we actually start issuing
+ * commands to the ATA channel. We wait for BSY to clear, then
+ * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
+ * result. Determine what devices, if any, are on the channel
+ * by looking at the device 0/1 error register. Look at the signature
+ * stored in each device's taskfile registers, to determine if
+ * the device is ATA or ATAPI.
+ *
+ * LOCKING:
+ * Inherited from caller. Some functions called by this function
+ * obtain the host_set lock.
+ *
+ * SIDE EFFECTS:
+ * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
+ */
+
+void ata_bus_reset(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ u8 err;
+ unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
+
+ DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
+
+ /* set up device control */
+ if (ap->flags & ATA_FLAG_MMIO)
+ writeb(ap->ctl, ioaddr->ctl_addr);
+ else
+ outb(ap->ctl, ioaddr->ctl_addr);
+
+ /* determine if device 0/1 are present */
+ dev0 = ata_dev_devchk(ap, 0);
+ if (slave_possible)
+ dev1 = ata_dev_devchk(ap, 1);
+
+ if (dev0)
+ devmask |= (1 << 0);
+ if (dev1)
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ __ata_dev_select(ap, 0);
+
+ /* issue bus reset */
+ if (ap->flags & ATA_FLAG_SRST)
+ rc = ata_bus_softreset(ap, devmask);
+ else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0)
+ rc = ata_bus_edd(ap);
+
+ if (rc)
+ goto err_out;
+
+ /*
+ * determine by signature whether we have ATA or ATAPI devices
+ */
+ err = ata_dev_try_classify(ap, 0, dev0);
+ if ((slave_possible) && (err != 0x81))
+ ata_dev_try_classify(ap, 1, dev1);
+
+ /* re-enable interrupts */
+ ata_irq_on(ap);
+
+ /* is double-select really necessary? */
+ if (ap->device[1].class != ATA_DEV_NONE)
+ __ata_dev_select(ap, 1);
+ if (ap->device[0].class != ATA_DEV_NONE)
+ __ata_dev_select(ap, 0);
+
+ /* if no devices were detected, disable this port */
+ if ((ap->device[0].class == ATA_DEV_NONE) &&
+ (ap->device[1].class == ATA_DEV_NONE))
+ goto err_out;
+
+ DPRINTK("EXIT\n");
+ return;
+
+err_out:
+ printk(KERN_ERR "ata%u: disabling port\n", ap->id);
+ ap->ops->port_disable(ap);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_host_set_pio -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+static void ata_host_set_pio(struct ata_port *ap)
+{
+ struct ata_device *master, *slave;
+ unsigned int pio, i;
+ u16 mask;
+
+ master = &ap->device[0];
+ slave = &ap->device[1];
+
+ assert (ata_dev_present(master) || ata_dev_present(slave));
+
+ mask = ap->pio_mask;
+ if (ata_dev_present(master))
+ mask &= (master->id[ATA_ID_PIO_MODES] & 0x03);
+ if (ata_dev_present(slave))
+ mask &= (slave->id[ATA_ID_PIO_MODES] & 0x03);
+
+ /* require pio mode 3 or 4 support for host and all devices */
+ if (mask == 0) {
+ printk(KERN_WARNING "ata%u: no PIO3/4 support, ignoring\n",
+ ap->id);
+ goto err_out;
+ }
+
+ pio = (mask & ATA_ID_PIO4) ? 4 : 3;
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ if (ata_dev_present(&ap->device[i])) {
+ ap->device[i].pio_mode = (pio == 3) ?
+ XFER_PIO_3 : XFER_PIO_4;
+ ap->ops->set_piomode(ap, &ap->device[i], pio);
+ }
+
+ return;
+
+err_out:
+ ap->ops->port_disable(ap);
+}
+
+/**
+ * ata_host_set_udma -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+static void ata_host_set_udma(struct ata_port *ap)
+{
+ struct ata_device *master, *slave;
+ u16 mask;
+ unsigned int i, j;
+ int udma_mode = -1;
+
+ master = &ap->device[0];
+ slave = &ap->device[1];
+
+ assert (ata_dev_present(master) || ata_dev_present(slave));
+ assert ((ap->flags & ATA_FLAG_PORT_DISABLED) == 0);
+
+ DPRINTK("udma masks: host 0x%X, master 0x%X, slave 0x%X\n",
+ ap->udma_mask,
+ (!ata_dev_present(master)) ? 0xff :
+ (master->id[ATA_ID_UDMA_MODES] & 0xff),
+ (!ata_dev_present(slave)) ? 0xff :
+ (slave->id[ATA_ID_UDMA_MODES] & 0xff));
+
+ mask = ap->udma_mask;
+ if (ata_dev_present(master))
+ mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
+ if (ata_dev_present(slave))
+ mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
+
+ i = XFER_UDMA_7;
+ while (i >= XFER_UDMA_0) {
+ j = i - XFER_UDMA_0;
+ DPRINTK("mask 0x%X i 0x%X j %u\n", mask, i, j);
+ if (mask & (1 << j)) {
+ udma_mode = i;
+ break;
+ }
+
+ i--;
+ }
+
+ /* require udma for host and all attached devices */
+ if (udma_mode < 0) {
+ printk(KERN_WARNING "ata%u: no UltraDMA support, ignoring\n",
+ ap->id);
+ goto err_out;
+ }
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ if (ata_dev_present(&ap->device[i])) {
+ ap->device[i].udma_mode = udma_mode;
+ ap->ops->set_udmamode(ap, &ap->device[i], udma_mode);
+ }
+
+ return;
+
+err_out:
+ ap->ops->port_disable(ap);
+}
+
+/**
+ * ata_dev_set_xfermode -
+ * @ap:
+ * @dev:
+ *
+ * LOCKING:
+ */
+
+static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+
+ /* set up set-features taskfile */
+ DPRINTK("set features - xfer mode\n");
+ ata_tf_init(ap, &tf, dev->devno);
+ tf.ctl |= ATA_NIEN;
+ tf.command = ATA_CMD_SET_FEATURES;
+ tf.feature = SETFEATURES_XFER;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+ if (dev->flags & ATA_DFLAG_PIO)
+ tf.nsect = dev->pio_mode;
+ else
+ tf.nsect = dev->udma_mode;
+
+ /* do bus reset */
+ ata_tf_to_host(ap, &tf);
+
+ /* crazy ATAPI devices... */
+ if (dev->class == ATA_DEV_ATAPI)
+ msleep(150);
+
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ ata_irq_on(ap); /* re-enable interrupts */
+
+ ata_wait_idle(ap);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_dev_set_udma -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ */
+
+static void ata_dev_set_udma(struct ata_port *ap, unsigned int device)
+{
+ struct ata_device *dev = &ap->device[device];
+
+ if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+ return;
+
+ ata_dev_set_xfermode(ap, dev);
+
+ assert((dev->udma_mode >= XFER_UDMA_0) &&
+ (dev->udma_mode <= XFER_UDMA_7));
+ printk(KERN_INFO "ata%u: dev %u configured for %s\n",
+ ap->id, device,
+ udma_str[dev->udma_mode - XFER_UDMA_0]);
+}
+
+/**
+ * ata_dev_set_pio -
+ * @ap:
+ * @device:
+ *
+ * LOCKING:
+ */
+
+static void ata_dev_set_pio(struct ata_port *ap, unsigned int device)
+{
+ struct ata_device *dev = &ap->device[device];
+
+ if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+ return;
+
+ /* force PIO mode */
+ dev->flags |= ATA_DFLAG_PIO;
+
+ ata_dev_set_xfermode(ap, dev);
+
+ assert((dev->pio_mode >= XFER_PIO_3) &&
+ (dev->pio_mode <= XFER_PIO_4));
+ printk(KERN_INFO "ata%u: dev %u configured for PIO%c\n",
+ ap->id, device,
+ dev->pio_mode == 3 ? '3' : '4');
+}
+
+/**
+ * ata_sg_clean -
+ * @qc:
+ *
+ * LOCKING:
+ */
+
+static void ata_sg_clean(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ Scsi_Cmnd *cmd = qc->scsicmd;
+ struct scatterlist *sg = qc->sg;
+ int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ assert(dir == SCSI_DATA_READ || dir == SCSI_DATA_WRITE);
+ assert(qc->flags & ATA_QCFLAG_SG);
+ assert(sg != NULL);
+
+ if (!cmd->use_sg)
+ assert(qc->n_elem == 1);
+
+ DPRINTK("unmapping %u sg elements\n", qc->n_elem);
+
+ if (cmd->use_sg)
+ pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
+ else
+ pci_unmap_single(ap->host_set->pdev, sg[0].dma_address,
+ sg[0].length, dir);
+
+ qc->flags &= ~ATA_QCFLAG_SG;
+ qc->sg = NULL;
+}
+
+/**
+ * ata_fill_sg -
+ * @qc:
+ *
+ * LOCKING:
+ *
+ */
+void ata_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct scatterlist *sg = qc->sg;
+ struct ata_port *ap = qc->ap;
+ unsigned int i;
+
+ assert(sg != NULL);
+ assert(qc->n_elem > 0);
+
+ for (i = 0; i < qc->n_elem; i++) {
+ ap->prd[i].addr = cpu_to_le32(sg[i].dma_address);
+ ap->prd[i].flags_len = cpu_to_le32(sg[i].length);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n",
+ i, le32_to_cpu(ap->prd[i].addr), le32_to_cpu(ap->prd[i].flags_len));
+ }
+ ap->prd[qc->n_elem - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_sg_setup_one -
+ * @qc:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ *
+ * RETURNS:
+ *
+ */
+
+static int ata_sg_setup_one(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ Scsi_Cmnd *cmd = qc->scsicmd;
+ int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+ struct scatterlist *sg = qc->sg;
+ unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG);
+
+ assert(sg == &qc->sgent);
+ assert(qc->n_elem == 1);
+
+ sg->page = virt_to_page(cmd->request_buffer);
+ sg->offset = (unsigned long) cmd->request_buffer & ~PAGE_MASK;
+ sg->length = cmd->request_bufflen;
+
+ if (!have_sg)
+ return 0;
+
+ sg->dma_address = pci_map_single(ap->host_set->pdev,
+ cmd->request_buffer,
+ cmd->request_bufflen, dir);
+
+ DPRINTK("mapped buffer of %d bytes for %s\n", cmd->request_bufflen,
+ qc->flags & ATA_QCFLAG_WRITE ? "write" : "read");
+
+ return 0;
+}
+
+/**
+ * ata_sg_setup -
+ * @qc:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ *
+ * RETURNS:
+ *
+ */
+
+static int ata_sg_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ Scsi_Cmnd *cmd = qc->scsicmd;
+ struct scatterlist *sg;
+ int n_elem;
+ unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG);
+
+ VPRINTK("ENTER, ata%u, use_sg %d\n", ap->id, cmd->use_sg);
+ assert(cmd->use_sg > 0);
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ if (have_sg) {
+ int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+ n_elem = pci_map_sg(ap->host_set->pdev, sg, cmd->use_sg, dir);
+ if (n_elem < 1)
+ return -1;
+ DPRINTK("%d sg elements mapped\n", n_elem);
+ } else {
+ n_elem = cmd->use_sg;
+ }
+ qc->n_elem = n_elem;
+
+ return 0;
+}
+
+/**
+ * ata_pio_poll -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static unsigned long ata_pio_poll(struct ata_port *ap)
+{
+ u8 status;
+ unsigned int poll_state = THR_UNKNOWN;
+ unsigned int reg_state = THR_UNKNOWN;
+ const unsigned int tmout_state = THR_PIO_TMOUT;
+
+ switch (ap->thr_state) {
+ case THR_PIO:
+ case THR_PIO_POLL:
+ poll_state = THR_PIO_POLL;
+ reg_state = THR_PIO;
+ break;
+ case THR_PIO_LAST:
+ case THR_PIO_LAST_POLL:
+ poll_state = THR_PIO_LAST_POLL;
+ reg_state = THR_PIO_LAST;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ status = ata_chk_status(ap);
+ if (status & ATA_BUSY) {
+ if (time_after(jiffies, ap->thr_timeout)) {
+ ap->thr_state = tmout_state;
+ return 0;
+ }
+ ap->thr_state = poll_state;
+ return ATA_SHORT_PAUSE;
+ }
+
+ ap->thr_state = reg_state;
+ return 0;
+}
+
+/**
+ * ata_pio_start -
+ * @qc:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_pio_start (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ assert((qc->tf.protocol == ATA_PROT_PIO_READ) ||
+ (qc->tf.protocol == ATA_PROT_PIO_WRITE));
+
+ qc->flags |= ATA_QCFLAG_POLL;
+ qc->tf.ctl |= ATA_NIEN; /* disable interrupts */
+ ata_tf_to_host_nolock(ap, &qc->tf);
+ ata_thread_wake(ap, THR_PIO);
+}
+
+/**
+ * ata_pio_complete -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+static void ata_pio_complete (struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ u8 drv_stat;
+
+ /*
+ * This is purely hueristic. This is a fast path.
+ * Sometimes when we enter, BSY will be cleared in
+ * a chk-status or two. If not, the drive is probably seeking
+ * or something. Snooze for a couple msecs, then
+ * chk-status again. If still busy, fall back to
+ * THR_PIO_POLL state.
+ */
+ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
+ if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+ msleep(2);
+ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
+ if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+ ap->thr_state = THR_PIO_LAST_POLL;
+ ap->thr_timeout = jiffies + ATA_TMOUT_PIO;
+ return;
+ }
+ }
+
+ drv_stat = ata_wait_idle(ap);
+ if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+ ap->thr_state = THR_PIO_ERR;
+ return;
+ }
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ assert(qc != NULL);
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->thr_state = THR_IDLE;
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ ata_irq_on(ap);
+
+ ata_qc_complete(qc, drv_stat, 0);
+}
+
+/**
+ * ata_pio_sector -
+ * @ap:
+ *
+ * LOCKING:
+ */
+
+static void ata_pio_sector(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ struct scatterlist *sg;
+ Scsi_Cmnd *cmd;
+ unsigned char *buf;
+ u8 status;
+
+ /*
+ * This is purely hueristic. This is a fast path.
+ * Sometimes when we enter, BSY will be cleared in
+ * a chk-status or two. If not, the drive is probably seeking
+ * or something. Snooze for a couple msecs, then
+ * chk-status again. If still busy, fall back to
+ * THR_PIO_POLL state.
+ */
+ status = ata_busy_wait(ap, ATA_BUSY, 5);
+ if (status & ATA_BUSY) {
+ msleep(2);
+ status = ata_busy_wait(ap, ATA_BUSY, 10);
+ if (status & ATA_BUSY) {
+ ap->thr_state = THR_PIO_POLL;
+ ap->thr_timeout = jiffies + ATA_TMOUT_PIO;
+ return;
+ }
+ }
+
+ /* handle BSY=0, DRQ=0 as error */
+ if ((status & ATA_DRQ) == 0) {
+ ap->thr_state = THR_PIO_ERR;
+ return;
+ }
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ assert(qc != NULL);
+
+ cmd = qc->scsicmd;
+ sg = qc->sg;
+
+ if (qc->cursect == (qc->nsect - 1))
+ ap->thr_state = THR_PIO_LAST;
+
+ buf = kmap(sg[qc->cursg].page) +
+ sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE);
+
+ qc->cursect++;
+ qc->cursg_ofs++;
+
+ if (cmd->use_sg)
+ if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg[qc->cursg].length) {
+ qc->cursg++;
+ qc->cursg_ofs = 0;
+ }
+
+ DPRINTK("data %s, drv_stat 0x%X\n",
+ qc->flags & ATA_QCFLAG_WRITE ? "write" : "read",
+ status);
+
+ /* do the actual data transfer */
+ /* FIXME: mmio-ize */
+ if (qc->flags & ATA_QCFLAG_WRITE)
+ outsl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS);
+ else
+ insl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS);
+
+ kunmap(sg[qc->cursg].page);
+}
+
+/**
+ * ata_eng_schedule - run an iteration of the pio/dma/whatever engine
+ * @ap: port on which activity will occur
+ * @eng: instance of engine
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+static void ata_eng_schedule (struct ata_port *ap, struct ata_engine *eng)
+{
+ /* FIXME */
+}
+
+/**
+ * ata_eng_timeout - Handle timeout of queued command
+ * @ap: Port on which timed-out command is active
+ *
+ * Some part of the kernel (currently, only the SCSI layer)
+ * has noticed that the active command on port @ap has not
+ * completed after a specified length of time. Handle this
+ * condition by disabling DMA (if necessary) and completing
+ * transactions, with error if necessary.
+ *
+ * This also handles the case of the "lost interrupt", where
+ * for some reason (possibly hardware bug, possibly driver bug)
+ * an interrupt was not delivered to the driver, even though the
+ * transaction completed successfully.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ */
+
+void ata_eng_timeout(struct ata_port *ap)
+{
+ u8 host_stat, drv_stat;
+ struct ata_queued_cmd *qc;
+
+ DPRINTK("ENTER\n");
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (!qc) {
+ printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+ ap->id);
+ goto out;
+ }
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA_READ:
+ case ATA_PROT_DMA_WRITE:
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void *mmio = (void *) ap->ioaddr.bmdma_addr;
+ host_stat = readb(mmio + ATA_DMA_STATUS);
+ } else
+ host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+ printk(KERN_ERR "ata%u: DMA timeout, stat 0x%x\n",
+ ap->id, host_stat);
+
+ ata_dma_complete(ap, host_stat, 1);
+ break;
+
+ case ATA_PROT_NODATA:
+ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+ printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n",
+ ap->id, qc->tf.command, drv_stat);
+
+ ata_qc_complete(qc, drv_stat, 1);
+ break;
+
+ default:
+ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+ printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
+ ap->id, qc->tf.command, drv_stat);
+
+ ata_qc_complete(qc, drv_stat, 1);
+ break;
+ }
+
+out:
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_qc_new -
+ * @ap:
+ * @dev:
+ *
+ * LOCKING:
+ */
+
+static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ATA_MAX_QUEUE; i++)
+ if (!test_and_set_bit(i, &ap->qactive)) {
+ qc = ata_qc_from_tag(ap, i);
+ break;
+ }
+
+ if (qc)
+ qc->tag = i;
+
+ return qc;
+}
+
+/**
+ * ata_qc_new_init -
+ * @ap:
+ * @dev:
+ *
+ * LOCKING:
+ */
+
+struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
+ struct ata_device *dev)
+{
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_new(ap);
+ if (qc) {
+ qc->sg = NULL;
+ qc->flags = 0;
+ qc->scsicmd = NULL;
+ qc->ap = ap;
+ qc->dev = dev;
+ INIT_LIST_HEAD(&qc->node);
+ init_MUTEX_LOCKED(&qc->sem);
+
+ ata_tf_init(ap, &qc->tf, dev->devno);
+
+ if (likely((dev->flags & ATA_DFLAG_PIO) == 0))
+ qc->flags |= ATA_QCFLAG_DMA;
+ if (dev->flags & ATA_DFLAG_LBA48)
+ qc->tf.flags |= ATA_TFLAG_LBA48;
+ }
+
+ return qc;
+}
+
+/**
+ * ata_qc_complete -
+ * @qc:
+ * @drv_stat:
+ * @done_late:
+ *
+ * LOCKING:
+ *
+ */
+
+void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late)
+{
+ struct ata_port *ap = qc->ap;
+ Scsi_Cmnd *cmd = qc->scsicmd;
+ unsigned int tag, do_clear = 0;
+
+ assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
+ assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+ if (likely(qc->flags & ATA_QCFLAG_SG))
+ ata_sg_clean(qc);
+
+ if (cmd) {
+ if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) {
+ if (qc->flags & ATA_QCFLAG_ATAPI)
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+ else
+ ata_to_sense_error(qc);
+ } else {
+ cmd->result = SAM_STAT_GOOD;
+ }
+
+ qc->scsidone(cmd);
+ }
+
+ qc->flags &= ~ATA_QCFLAG_ACTIVE;
+ tag = qc->tag;
+ if (likely(ata_tag_valid(tag))) {
+ if (tag == ap->active_tag)
+ ap->active_tag = ATA_TAG_POISON;
+ qc->tag = ATA_TAG_POISON;
+ do_clear = 1;
+ }
+
+ up(&qc->sem);
+
+ if (likely(do_clear))
+ clear_bit(tag, &ap->qactive);
+}
+
+/**
+ * ata_qc_push -
+ * @qc:
+ * @append:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_engine *eng = &ap->eng;
+
+ if (likely(append))
+ list_add_tail(&qc->node, &eng->q);
+ else
+ list_add(&qc->node, &eng->q);
+
+ if (!test_and_set_bit(ATA_EFLG_ACTIVE, &eng->flags))
+ ata_eng_schedule(ap, eng);
+}
+
+/**
+ * ata_qc_issue -
+ * @qc:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+int ata_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ Scsi_Cmnd *cmd = qc->scsicmd;
+ unsigned int dma = qc->flags & ATA_QCFLAG_DMA;
+
+ ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+ /* set up SG table */
+ if (cmd->use_sg) {
+ if (ata_sg_setup(qc))
+ goto err_out;
+ } else {
+ if (ata_sg_setup_one(qc))
+ goto err_out;
+ }
+
+ ap->ops->fill_sg(qc);
+
+ qc->ap->active_tag = qc->tag;
+ qc->flags |= ATA_QCFLAG_ACTIVE;
+
+ if (likely(dma)) {
+ ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_start(qc); /* initiate bmdma */
+ } else
+ /* load tf registers, initiate polling pio */
+ ata_pio_start(qc);
+
+ return 0;
+
+err_out:
+ return -1;
+}
+
+/**
+ * ata_bmdma_start_mmio -
+ * @qc:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE);
+ u8 host_stat, dmactl;
+ void *mmio = (void *) ap->ioaddr.bmdma_addr;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction */
+ /* FIXME: redundant to later start-dma command? */
+ writeb(rw ? 0 : ATA_DMA_WR, mmio + ATA_DMA_CMD);
+
+ /* clear interrupt, error bits */
+ host_stat = readb(mmio + ATA_DMA_STATUS);
+ writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, mmio + ATA_DMA_STATUS);
+
+ /* issue r/w command */
+ ap->ops->exec_command(ap, &qc->tf);
+
+ /* start host DMA transaction */
+ dmactl = readb(mmio + ATA_DMA_CMD);
+ writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
+
+ /* Strictly, one may wish to issue a readb() here, to
+ * flush the mmio write. However, control also passes
+ * to the hardware at this point, and it will interrupt
+ * us when we are to resume control. So, in effect,
+ * we don't care when the mmio write flushes.
+ * Further, a read of the DMA status register _immediately_
+ * following the write may not be what certain flaky hardware
+ * is expected, so I think it is best to not add a readb()
+ * without first all the MMIO ATA cards/mobos.
+ * Or maybe I'm just being paranoid.
+ */
+}
+
+/**
+ * ata_bmdma_start_pio -
+ * @qc:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE);
+ u8 host_stat, dmactl;
+
+ /* load PRD table addr. */
+ outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction */
+ /* FIXME: redundant to later start-dma command? */
+ outb(rw ? 0 : ATA_DMA_WR, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* clear interrupt, error bits */
+ host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+ ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+ /* issue r/w command */
+ ap->ops->exec_command(ap, &qc->tf);
+
+ /* start host DMA transaction */
+ dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ outb(dmactl | ATA_DMA_START,
+ ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
+/**
+ * ata_dma_complete -
+ * @ap:
+ * @host_stat:
+ * @done_late:
+ *
+ * LOCKING:
+ */
+
+static void ata_dma_complete(struct ata_port *ap, u8 host_stat,
+ unsigned int done_late)
+{
+ VPRINTK("ENTER\n");
+
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void *mmio = (void *) ap->ioaddr.bmdma_addr;
+
+ /* clear start/stop bit */
+ writeb(0, mmio + ATA_DMA_CMD);
+
+ /* ack intr, err bits */
+ writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+ mmio + ATA_DMA_STATUS);
+ } else {
+ /* clear start/stop bit */
+ outb(0, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* ack intr, err bits */
+ outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+ ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ }
+
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_altstatus(ap); /* dummy read */
+
+ DPRINTK("host %u, host_stat==0x%X, drv_stat==0x%X\n",
+ ap->id, (u32) host_stat, (u32) ata_chk_status(ap));
+
+ /* get drive status; clear intr; complete txn */
+ ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
+ ata_wait_idle(ap), done_late);
+}
+
+/**
+ * ata_host_intr - Handle host interrupt for given (port, task)
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
+ *
+ * Handle host interrupt for given queued command. Currently,
+ * only DMA interrupts are handled. All other commands are
+ * handled via polling with interrupts disabled (nIEN bit).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ *
+ * RETURNS:
+ * One if interrupt was handled, zero if not (shared irq).
+ */
+
+static inline unsigned int ata_host_intr (struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ u8 status, host_stat;
+ unsigned int handled = 0;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA_READ:
+ case ATA_PROT_DMA_WRITE:
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void *mmio = (void *) ap->ioaddr.bmdma_addr;
+ host_stat = readb(mmio + ATA_DMA_STATUS);
+ } else
+ host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat);
+
+ if (!(host_stat & ATA_DMA_INTR)) {
+ ap->stats.idle_irq++;
+ break;
+ }
+
+ ata_dma_complete(ap, host_stat, 0);
+ handled = 1;
+ break;
+
+ case ATA_PROT_NODATA: /* command completion, but no data xfer */
+ status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+ DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+ ata_qc_complete(qc, status, 0);
+ handled = 1;
+ break;
+
+ default:
+ ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ handled = 1;
+ ata_irq_ack(ap, 0); /* debug trap */
+ printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
+ }
+#endif
+ break;
+ }
+
+ return handled;
+}
+
+/**
+ * ata_interrupt -
+ * @irq:
+ * @dev_instance:
+ * @regs:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ata_host_set *host_set = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+ spin_lock_irqsave(&host_set->lock, flags);
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ struct ata_port *ap;
+
+ ap = host_set->ports[i];
+ if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0))
+ handled += ata_host_intr(ap, qc);
+ }
+ }
+
+ spin_unlock_irqrestore(&host_set->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * ata_thread_wake -
+ * @ap:
+ * @thr_state:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_thread_wake(struct ata_port *ap, unsigned int thr_state)
+{
+ assert(ap->thr_state == THR_IDLE);
+ ap->thr_state = thr_state;
+ up(&ap->thr_sem);
+}
+
+/**
+ * ata_thread_timer -
+ * @opaque:
+ *
+ * LOCKING:
+ */
+
+static void ata_thread_timer(unsigned long opaque)
+{
+ struct ata_port *ap = (struct ata_port *) opaque;
+
+ up(&ap->thr_sem);
+}
+
+/**
+ * ata_thread_iter -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static unsigned long ata_thread_iter(struct ata_port *ap)
+{
+ long timeout = 0;
+
+ DPRINTK("ata%u: thr_state %s\n",
+ ap->id, ata_thr_state_name(ap->thr_state));
+
+ switch (ap->thr_state) {
+ case THR_UNKNOWN:
+ ap->thr_state = THR_PORT_RESET;
+ break;
+
+ case THR_PROBE_START:
+ down(&ap->sem);
+ ap->thr_state = THR_PORT_RESET;
+ break;
+
+ case THR_PORT_RESET:
+ ata_port_reset(ap);
+ break;
+
+ case THR_PROBE_SUCCESS:
+ up(&ap->probe_sem);
+ ap->thr_state = THR_IDLE;
+ break;
+
+ case THR_PROBE_FAILED:
+ up(&ap->probe_sem);
+ ap->thr_state = THR_AWAIT_DEATH;
+ break;
+
+ case THR_AWAIT_DEATH:
+ timeout = -1;
+ break;
+
+ case THR_IDLE:
+ timeout = 30 * HZ;
+ break;
+
+ case THR_PIO:
+ ata_pio_sector(ap);
+ break;
+
+ case THR_PIO_LAST:
+ ata_pio_complete(ap);
+ break;
+
+ case THR_PIO_POLL:
+ case THR_PIO_LAST_POLL:
+ timeout = ata_pio_poll(ap);
+ break;
+
+ case THR_PIO_TMOUT:
+ printk(KERN_ERR "ata%d: FIXME: THR_PIO_TMOUT\n", /* FIXME */
+ ap->id);
+ timeout = 11 * HZ;
+ break;
+
+ case THR_PIO_ERR:
+ printk(KERN_ERR "ata%d: FIXME: THR_PIO_ERR\n", /* FIXME */
+ ap->id);
+ timeout = 11 * HZ;
+ break;
+
+ case THR_PACKET:
+ atapi_cdb_send(ap);
+ break;
+
+ default:
+ printk(KERN_DEBUG "ata%u: unknown thr state %s\n",
+ ap->id, ata_thr_state_name(ap->thr_state));
+ break;
+ }
+
+ DPRINTK("ata%u: new thr_state %s, returning %ld\n",
+ ap->id, ata_thr_state_name(ap->thr_state), timeout);
+ return timeout;
+}
+
+/**
+ * ata_thread -
+ * @data:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static int ata_thread (void *data)
+{
+ struct ata_port *ap = data;
+ long timeout;
+
+ daemonize ("katad-%u", ap->id);
+ allow_signal(SIGTERM);
+
+ while (1) {
+ cond_resched();
+
+ timeout = ata_thread_iter(ap);
+
+ if (signal_pending (current))
+ flush_signals(current);
+
+ if ((timeout < 0) || (ap->time_to_die))
+ break;
+
+ /* note sleeping for full timeout not guaranteed (that's ok) */
+ if (timeout) {
+ mod_timer(&ap->thr_timer, jiffies + timeout);
+ down_interruptible(&ap->thr_sem);
+
+ if (signal_pending (current))
+ flush_signals(current);
+
+ if (ap->time_to_die)
+ break;
+ }
+ }
+
+ printk(KERN_DEBUG "ata%u: thread exiting\n", ap->id);
+ ap->thr_pid = -1;
+ complete_and_exit (&ap->thr_exited, 0);
+}
+
+/**
+ * ata_thread_kill - kill per-port kernel thread
+ * @ap: port those thread is to be killed
+ *
+ * LOCKING:
+ *
+ */
+
+static int ata_thread_kill(struct ata_port *ap)
+{
+ int ret = 0;
+
+ if (ap->thr_pid >= 0) {
+ ap->time_to_die = 1;
+ wmb();
+ ret = kill_proc(ap->thr_pid, SIGTERM, 1);
+ if (ret)
+ printk(KERN_ERR "ata%d: unable to kill kernel thread\n",
+ ap->id);
+ else
+ wait_for_completion(&ap->thr_exited);
+ }
+
+ return ret;
+}
+
+/**
+ * atapi_cdb_send - Write CDB bytes to hardware
+ * @ap: Port to which ATAPI device is attached.
+ *
+ * When device has indicated its readiness to accept
+ * a CDB, this function is called. Send the CDB.
+ * If DMA is to be performed, exit immediately.
+ * Otherwise, we are in polling mode, so poll
+ * status under operation succeeds or fails.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+
+static void atapi_cdb_send(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ u8 status;
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ assert(qc != NULL);
+ assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+ /* sleep-wait for BSY to clear */
+ DPRINTK("busy wait\n");
+ if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
+ goto err_out;
+
+ /* make sure DRQ is set */
+ status = ata_chk_status(ap);
+ if ((status & ATA_DRQ) == 0)
+ goto err_out;
+
+ /* send SCSI cdb */
+ /* FIXME: mmio-ize */
+ DPRINTK("send cdb\n");
+ outsl(ap->ioaddr.data_addr,
+ qc->scsicmd->cmnd, ap->host->max_cmd_len / 4);
+
+ /* if we are DMA'ing, irq handler takes over from here */
+ if (qc->tf.feature == ATAPI_PKT_DMA)
+ goto out;
+
+ /* sleep-wait for BSY to clear */
+ DPRINTK("busy wait 2\n");
+ if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
+ goto err_out;
+
+ /* wait for BSY,DRQ to clear */
+ status = ata_wait_idle(ap);
+ if (status & (ATA_BUSY | ATA_DRQ))
+ goto err_out;
+
+ /* transaction completed, indicate such to scsi stack */
+ ata_qc_complete(qc, status, 0);
+ ata_irq_on(ap);
+
+out:
+ ap->thr_state = THR_IDLE;
+ return;
+
+err_out:
+ ata_qc_complete(qc, ATA_ERR, 0);
+ goto out;
+}
+
+/**
+ * ata_host_remove -
+ * @ap:
+ * @do_unregister:
+ *
+ * LOCKING:
+ */
+
+static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
+{
+ struct Scsi_Host *sh = ap->host;
+
+ DPRINTK("ENTER\n");
+
+ if (do_unregister)
+ scsi_remove_host(sh); /* FIXME: check return val */
+
+ ata_thread_kill(ap); /* FIXME: check return val */
+
+ pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+}
+
+/**
+ * ata_host_init -
+ * @host:
+ * @ent:
+ * @port_no:
+ *
+ * LOCKING:
+ *
+ */
+
+static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
+ struct ata_host_set *host_set,
+ struct ata_probe_ent *ent, unsigned int port_no)
+{
+ unsigned int i;
+
+ host->max_id = 16;
+ host->max_lun = 1;
+ host->max_channel = 1;
+ host->unique_id = ata_unique_id++;
+ host->max_cmd_len = 12;
+ scsi_set_device(host, &ent->pdev->dev);
+
+ ap->flags = ATA_FLAG_PORT_DISABLED;
+ ap->id = host->unique_id;
+ ap->host = host;
+ ap->ctl = ATA_DEVCTL_OBS;
+ ap->host_set = host_set;
+ ap->port_no = port_no;
+ ap->pio_mask = ent->pio_mask;
+ ap->udma_mask = ent->udma_mask;
+ ap->flags |= ent->host_flags;
+ ap->ops = ent->port_ops;
+ ap->thr_state = THR_PROBE_START;
+ ap->cbl = ATA_CBL_NONE;
+ ap->device[0].flags = ATA_DFLAG_MASTER;
+ ap->active_tag = ATA_TAG_POISON;
+
+ /* ata_engine init */
+ ap->eng.flags = 0;
+ INIT_LIST_HEAD(&ap->eng.q);
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ap->device[i].devno = i;
+
+ init_completion(&ap->thr_exited);
+ init_MUTEX_LOCKED(&ap->probe_sem);
+ init_MUTEX_LOCKED(&ap->sem);
+ init_MUTEX_LOCKED(&ap->thr_sem);
+
+ init_timer(&ap->thr_timer);
+ ap->thr_timer.function = ata_thread_timer;
+ ap->thr_timer.data = (unsigned long) ap;
+
+#ifdef ATA_IRQ_TRAP
+ ap->stats.unhandled_irq = 1;
+ ap->stats.idle_irq = 1;
+#endif
+
+ memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
+}
+
+/**
+ * ata_host_add -
+ * @ent:
+ * @host_set:
+ * @port_no:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
+ struct ata_host_set *host_set,
+ unsigned int port_no)
+{
+ struct pci_dev *pdev = ent->pdev;
+ struct Scsi_Host *host;
+ struct ata_port *ap;
+
+ DPRINTK("ENTER\n");
+ host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
+ if (!host)
+ return NULL;
+
+ ap = (struct ata_port *) &host->hostdata[0];
+
+ ata_host_init(ap, host, host_set, ent, port_no);
+
+ ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma);
+ if (!ap->prd)
+ goto err_out;
+ DPRINTK("prd alloc, virt %p, dma %x\n", ap->prd, ap->prd_dma);
+
+ ap->thr_pid = kernel_thread(ata_thread, ap, CLONE_FS | CLONE_FILES);
+ if (ap->thr_pid < 0) {
+ printk(KERN_ERR "ata%d: unable to start kernel thread\n",
+ ap->id);
+ goto err_out_free;
+ }
+
+ return ap;
+
+err_out_free:
+ pci_free_consistent(ap->host_set->pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+
+err_out:
+ scsi_host_put(host);
+ return NULL;
+}
+
+/**
+ * ata_device_add -
+ * @ent:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+int ata_device_add(struct ata_probe_ent *ent)
+{
+ unsigned int count = 0, i;
+ struct pci_dev *pdev = ent->pdev;
+ struct ata_host_set *host_set;
+
+ DPRINTK("ENTER\n");
+ /* alloc a container for our list of ATA ports (buses) */
+ host_set = kmalloc(sizeof(struct ata_host_set) +
+ (ent->n_ports * sizeof(void *)), GFP_KERNEL);
+ if (!host_set)
+ return 0;
+ memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
+ spin_lock_init(&host_set->lock);
+
+ host_set->pdev = pdev;
+ host_set->n_ports = ent->n_ports;
+ host_set->irq = ent->irq;
+ host_set->mmio_base = ent->mmio_base;
+
+ /* register each port bound to this device */
+ for (i = 0; i < ent->n_ports; i++) {
+ struct ata_port *ap;
+
+ ap = ata_host_add(ent, host_set, i);
+ if (!ap)
+ goto err_out;
+
+ host_set->ports[i] = ap;
+
+ /* print per-port info to dmesg */
+ printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
+ "bmdma 0x%lX irq %lu\n",
+ ap->id,
+ ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
+ ata_udma_string(ent->udma_mask),
+ ap->ioaddr.cmd_addr,
+ ap->ioaddr.ctl_addr,
+ ap->ioaddr.bmdma_addr,
+ ent->irq);
+
+ count++;
+ }
+
+ if (!count) {
+ kfree(host_set);
+ return 0;
+ }
+
+ /* obtain irq, that is shared between channels */
+ if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
+ DRV_NAME, host_set))
+ goto err_out;
+
+ /* perform each probe synchronously */
+ DPRINTK("probe begin\n");
+ for (i = 0; i < count; i++) {
+ struct ata_port *ap;
+ int rc;
+
+ ap = host_set->ports[i];
+
+ DPRINTK("ata%u: probe begin\n", ap->id);
+ up(&ap->sem); /* start probe */
+
+ DPRINTK("ata%u: probe-wait begin\n", ap->id);
+ down(&ap->probe_sem); /* wait for end */
+
+ DPRINTK("ata%u: probe-wait end\n", ap->id);
+
+ rc = scsi_add_host(ap->host, &pdev->dev);
+ if (rc) {
+ printk(KERN_ERR "ata%u: scsi_add_host failed\n",
+ ap->id);
+ /* FIXME: do something useful here */
+ /* FIXME: handle unconditional calls to
+ * scsi_scan_host and ata_host_remove, below,
+ * at the very least
+ */
+ }
+ }
+
+ /* probes are done, now scan each port's disk(s) */
+ DPRINTK("probe begin\n");
+ for (i = 0; i < count; i++) {
+ struct ata_port *ap = host_set->ports[i];
+
+ scsi_scan_host(ap->host);
+ }
+
+ pci_set_drvdata(pdev, host_set);
+
+ VPRINTK("EXIT, returning %u\n", ent->n_ports);
+ return ent->n_ports; /* success */
+
+err_out:
+ for (i = 0; i < count; i++) {
+ ata_host_remove(host_set->ports[i], 1);
+ scsi_host_put(host_set->ports[i]->host);
+ }
+ kfree(host_set);
+ VPRINTK("EXIT, returning 0\n");
+ return 0;
+}
+
+/**
+ * ata_scsi_release - SCSI layer callback hook for host unload
+ * @host: libata host to be unloaded
+ *
+ * Performs all duties necessary to shut down a libata port:
+ * Kill port kthread, disable port, and release resources.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer.
+ *
+ * RETURNS:
+ * One.
+ */
+
+int ata_scsi_release(struct Scsi_Host *host)
+{
+ struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
+
+ DPRINTK("ENTER\n");
+
+ ap->ops->port_disable(ap);
+ ata_host_remove(ap, 0);
+
+ DPRINTK("EXIT\n");
+ return 1;
+}
+
+/**
+ * ata_std_ports - initialize ioaddr with standard port offsets.
+ * @ioaddr:
+ */
+void ata_std_ports(struct ata_ioports *ioaddr)
+{
+ ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
+ ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
+ ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
+ ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
+ ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
+ ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
+ ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
+ ioaddr->cmdstat_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+}
+
+/**
+ * ata_pci_init_one -
+ * @pdev:
+ * @port_info:
+ * @n_ports:
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ *
+ */
+
+int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
+ unsigned int n_ports)
+{
+ struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
+ struct ata_port_info *port0, *port1;
+ u8 tmp8, mask;
+ unsigned int legacy_mode = 0;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ port0 = port_info[0];
+ if (n_ports > 1)
+ port1 = port_info[1];
+ else
+ port1 = port0;
+
+ if ((port0->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
+ /* TODO: support transitioning to native mode? */
+ pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+ mask = (1 << 2) | (1 << 0);
+ if ((tmp8 & mask) != mask)
+ legacy_mode = (1 << 3);
+ }
+
+ /* FIXME... */
+ if ((!legacy_mode) && (n_ports > 1)) {
+ printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
+ return -EINVAL;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+
+ if (legacy_mode) {
+ if (!request_region(0x1f0, 8, "libata")) {
+ struct resource *conflict, res;
+ res.start = 0x1f0;
+ res.end = 0x1f0 + 8 - 1;
+ conflict = ____request_resource(&ioport_resource, &res);
+ if (!strcmp(conflict->name, "libata"))
+ legacy_mode |= (1 << 0);
+ else
+ printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
+ } else
+ legacy_mode |= (1 << 0);
+
+ if (!request_region(0x170, 8, "libata")) {
+ struct resource *conflict, res;
+ res.start = 0x170;
+ res.end = 0x170 + 8 - 1;
+ conflict = ____request_resource(&ioport_resource, &res);
+ if (!strcmp(conflict->name, "libata"))
+ legacy_mode |= (1 << 1);
+ else
+ printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
+ } else
+ legacy_mode |= (1 << 1);
+ }
+
+ /* we have legacy mode, but all ports are unavailable */
+ if (legacy_mode == (1 << 3)) {
+ rc = -EBUSY;
+ goto err_out_regions;
+ }
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ goto err_out_regions;
+
+ probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+ if (!probe_ent) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ memset(probe_ent, 0, sizeof(*probe_ent));
+ probe_ent->pdev = pdev;
+ INIT_LIST_HEAD(&probe_ent->node);
+
+ if (legacy_mode) {
+ probe_ent2 = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+ if (!probe_ent2) {
+ rc = -ENOMEM;
+ goto err_out_free_ent;
+ }
+
+ memset(probe_ent2, 0, sizeof(*probe_ent));
+ probe_ent2->pdev = pdev;
+ INIT_LIST_HEAD(&probe_ent2->node);
+ }
+
+ probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
+ probe_ent->sht = port0->sht;
+ probe_ent->host_flags = port0->host_flags;
+ probe_ent->pio_mask = port0->pio_mask;
+ probe_ent->udma_mask = port0->udma_mask;
+ probe_ent->port_ops = port0->port_ops;
+
+ if (legacy_mode) {
+ probe_ent->port[0].cmd_addr = 0x1f0;
+ probe_ent->port[0].ctl_addr = 0x3f6;
+ probe_ent->n_ports = 1;
+ probe_ent->irq = 14;
+ ata_std_ports(&probe_ent->port[0]);
+
+ probe_ent2->port[0].cmd_addr = 0x170;
+ probe_ent2->port[0].ctl_addr = 0x376;
+ probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
+ probe_ent2->n_ports = 1;
+ probe_ent2->irq = 15;
+ ata_std_ports(&probe_ent2->port[0]);
+
+ probe_ent2->sht = port1->sht;
+ probe_ent2->host_flags = port1->host_flags;
+ probe_ent2->pio_mask = port1->pio_mask;
+ probe_ent2->udma_mask = port1->udma_mask;
+ probe_ent2->port_ops = port1->port_ops;
+ } else {
+ probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
+ ata_std_ports(&probe_ent->port[0]);
+ probe_ent->port[0].ctl_addr =
+ pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
+
+ probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
+ ata_std_ports(&probe_ent->port[1]);
+ probe_ent->port[1].ctl_addr =
+ pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
+ probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+
+ probe_ent->n_ports = 2;
+ probe_ent->irq = pdev->irq;
+ probe_ent->irq_flags = SA_SHIRQ;
+ }
+
+ pci_set_master(pdev);
+
+ /* FIXME: check ata_device_add return */
+ if (legacy_mode) {
+ if (legacy_mode & (1 << 0))
+ ata_device_add(probe_ent);
+ if (legacy_mode & (1 << 1))
+ ata_device_add(probe_ent2);
+ kfree(probe_ent2);
+ } else {
+ ata_device_add(probe_ent);
+ assert(probe_ent2 == NULL);
+ }
+ kfree(probe_ent);
+
+ return 0;
+
+err_out_free_ent:
+ kfree(probe_ent);
+err_out_regions:
+ if (legacy_mode & (1 << 0))
+ release_region(0x1f0, 8);
+ if (legacy_mode & (1 << 1))
+ release_region(0x170, 8);
+ pci_release_regions(pdev);
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+/**
+ * ata_pci_remove_one - PCI layer callback for device removal
+ * @pdev: PCI device that was removed
+ *
+ * PCI layer indicates to libata via this hook that
+ * hot-unplug or module unload event has occured.
+ * Handle this by unregistering all objects associated
+ * with this PCI device. Free those objects. Then finally
+ * release PCI resources and disable device.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ */
+
+void ata_pci_remove_one (struct pci_dev *pdev)
+{
+ struct ata_host_set *host_set = pci_get_drvdata(pdev);
+ struct ata_port *ap;
+ unsigned int i;
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ ap = host_set->ports[i];
+
+ /* FIXME: check return val */
+ scsi_remove_host(ap->host);
+ }
+
+ free_irq(host_set->irq, host_set);
+ if (host_set->mmio_base)
+ iounmap(host_set->mmio_base);
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ Scsi_Host_Template *sht;
+
+ ap = host_set->ports[i];
+ sht = ap->host->hostt;
+
+ ata_scsi_release(ap->host);
+ scsi_host_put(ap->host); /* FIXME: check return val */
+ }
+
+ kfree(host_set);
+
+ pci_release_regions(pdev);
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ struct ata_ioports *ioaddr;
+
+ ap = host_set->ports[i];
+ ioaddr = &ap->ioaddr;
+
+ if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
+ if (ioaddr->cmd_addr == 0x1f0)
+ release_region(0x1f0, 8);
+ else if (ioaddr->cmd_addr == 0x170)
+ release_region(0x170, 8);
+ }
+ }
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* move to PCI subsystem */
+int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
+{
+ unsigned long tmp = 0;
+
+ switch (bits->width) {
+ case 1: {
+ u8 tmp8 = 0;
+ pci_read_config_byte(pdev, bits->reg, &tmp8);
+ tmp = tmp8;
+ break;
+ }
+ case 2: {
+ u16 tmp16 = 0;
+ pci_read_config_word(pdev, bits->reg, &tmp16);
+ tmp = tmp16;
+ break;
+ }
+ case 4: {
+ u32 tmp32 = 0;
+ pci_read_config_dword(pdev, bits->reg, &tmp32);
+ tmp = tmp32;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ tmp &= bits->mask;
+
+ return (tmp == bits->val) ? 1 : 0;
+}
+
+/**
+ * ata_init -
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static int __init ata_init(void)
+{
+ printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
+ return 0;
+}
+
+module_init(ata_init);
+
+/*
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers. As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
+ */
+
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_std_ports);
+EXPORT_SYMBOL_GPL(ata_device_add);
+EXPORT_SYMBOL_GPL(ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_eng_timeout);
+EXPORT_SYMBOL_GPL(ata_tf_load_pio);
+EXPORT_SYMBOL_GPL(ata_tf_load_mmio);
+EXPORT_SYMBOL_GPL(ata_tf_read_pio);
+EXPORT_SYMBOL_GPL(ata_tf_read_mmio);
+EXPORT_SYMBOL_GPL(ata_check_status_pio);
+EXPORT_SYMBOL_GPL(ata_check_status_mmio);
+EXPORT_SYMBOL_GPL(ata_exec_command_pio);
+EXPORT_SYMBOL_GPL(ata_exec_command_mmio);
+EXPORT_SYMBOL_GPL(ata_interrupt);
+EXPORT_SYMBOL_GPL(ata_fill_sg);
+EXPORT_SYMBOL_GPL(ata_bmdma_start_pio);
+EXPORT_SYMBOL_GPL(ata_bmdma_start_mmio);
+EXPORT_SYMBOL_GPL(ata_port_probe);
+EXPORT_SYMBOL_GPL(sata_phy_reset);
+EXPORT_SYMBOL_GPL(pata_phy_config);
+EXPORT_SYMBOL_GPL(ata_bus_reset);
+EXPORT_SYMBOL_GPL(ata_port_disable);
+EXPORT_SYMBOL_GPL(ata_pci_init_one);
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+EXPORT_SYMBOL_GPL(ata_scsi_error);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_release);
+
diff -Nru a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/libata-scsi.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,1115 @@
+/*
+ libata-scsi.c - helper library for ATA
+
+ Copyright 2003 Red Hat, Inc. All rights reserved.
+ Copyright 2003 Jeff Garzik
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+ */
+
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+
+#include "libata.h"
+
+struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap,
+ struct ata_device *dev,
+ Scsi_Cmnd *cmd,
+ void (*done)(Scsi_Cmnd *))
+{
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_new_init(ap, dev);
+ if (qc) {
+ qc->scsicmd = cmd;
+ qc->scsidone = done;
+
+ if (cmd->use_sg) {
+ qc->sg = (struct scatterlist *) cmd->request_buffer;
+ qc->n_elem = cmd->use_sg;
+ } else {
+ qc->sg = &qc->sgent;
+ qc->n_elem = 1;
+ }
+ } else {
+ cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
+ done(cmd);
+ }
+
+ return qc;
+}
+
+/**
+ * ata_to_sense_error -
+ * @qc:
+ * @cmd:
+ *
+ * LOCKING:
+ */
+
+void ata_to_sense_error(struct ata_queued_cmd *qc)
+{
+ Scsi_Cmnd *cmd = qc->scsicmd;
+
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = MEDIUM_ERROR;
+ cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
+
+ /* additional-sense-code[-qualifier] */
+ if ((qc->flags & ATA_QCFLAG_WRITE) == 0) {
+ cmd->sense_buffer[12] = 0x11; /* "unrecovered read error" */
+ cmd->sense_buffer[13] = 0x04;
+ } else {
+ cmd->sense_buffer[12] = 0x0C; /* "write error - */
+ cmd->sense_buffer[13] = 0x02; /* auto-reallocation failed" */
+ }
+}
+
+/**
+ * ata_scsi_slave_config -
+ * @sdev:
+ *
+ * LOCKING:
+ *
+ */
+
+int ata_scsi_slave_config(struct scsi_device *sdev)
+{
+ sdev->use_10_for_rw = 1;
+ sdev->use_10_for_ms = 1;
+
+ return 0; /* scsi layer doesn't check return value, sigh */
+}
+
+/**
+ * ata_scsi_error - SCSI layer error handler callback
+ * @host: SCSI host on which error occurred
+ *
+ * Handles SCSI-layer-thrown error events.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ *
+ * RETURNS:
+ * Zero.
+ */
+
+int ata_scsi_error(struct Scsi_Host *host)
+{
+ struct ata_port *ap;
+
+ DPRINTK("ENTER\n");
+
+ ap = (struct ata_port *) &host->hostdata[0];
+ ap->ops->eng_timeout(ap);
+
+ DPRINTK("EXIT\n");
+ return 0;
+}
+
+/**
+ * ata_scsi_rw_xlat -
+ * @qc:
+ * @scsicmd:
+ * @cmd_size:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ *
+ * RETURNS:
+ *
+ */
+
+static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd,
+ unsigned int cmd_size)
+{
+ struct ata_taskfile *tf = &qc->tf;
+ unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
+ unsigned int dma = qc->flags & ATA_QCFLAG_DMA;
+
+ qc->cursect = qc->cursg = qc->cursg_ofs = 0;
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->hob_nsect = 0;
+ tf->hob_lbal = 0;
+ tf->hob_lbam = 0;
+ tf->hob_lbah = 0;
+
+ if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
+ scsicmd[0] == READ_16) {
+ if (likely(dma)) {
+ if (lba48)
+ tf->command = ATA_CMD_READ_EXT;
+ else
+ tf->command = ATA_CMD_READ;
+ tf->protocol = ATA_PROT_DMA_READ;
+ } else {
+ if (lba48)
+ tf->command = ATA_CMD_PIO_READ_EXT;
+ else
+ tf->command = ATA_CMD_PIO_READ;
+ tf->protocol = ATA_PROT_PIO_READ;
+ }
+ qc->flags &= ~ATA_QCFLAG_WRITE;
+ VPRINTK("reading\n");
+ } else {
+ if (likely(dma)) {
+ if (lba48)
+ tf->command = ATA_CMD_WRITE_EXT;
+ else
+ tf->command = ATA_CMD_WRITE;
+ tf->protocol = ATA_PROT_DMA_WRITE;
+ } else {
+ if (lba48)
+ tf->command = ATA_CMD_PIO_WRITE_EXT;
+ else
+ tf->command = ATA_CMD_PIO_WRITE;
+ tf->protocol = ATA_PROT_PIO_WRITE;
+ }
+ qc->flags |= ATA_QCFLAG_WRITE;
+ VPRINTK("writing\n");
+ }
+
+ if (cmd_size == 10) {
+ if (lba48) {
+ tf->hob_nsect = scsicmd[7];
+ tf->hob_lbal = scsicmd[2];
+
+ qc->nsect = ((unsigned int)scsicmd[7] << 8) |
+ scsicmd[8];
+ } else {
+ /* if we don't support LBA48 addressing, the request
+ * -may- be too large. */
+ if ((scsicmd[2] & 0xf0) || scsicmd[7])
+ return 1;
+
+ /* stores LBA27:24 in lower 4 bits of device reg */
+ tf->device |= scsicmd[2];
+
+ qc->nsect = scsicmd[8];
+ }
+ tf->device |= ATA_LBA;
+
+ tf->nsect = scsicmd[8];
+ tf->lbal = scsicmd[5];
+ tf->lbam = scsicmd[4];
+ tf->lbah = scsicmd[3];
+
+ VPRINTK("ten-byte command\n");
+ return 0;
+ }
+
+ if (cmd_size == 6) {
+ qc->nsect = tf->nsect = scsicmd[4];
+ tf->lbal = scsicmd[3];
+ tf->lbam = scsicmd[2];
+ tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
+
+ VPRINTK("six-byte command\n");
+ return 0;
+ }
+
+ if (cmd_size == 16) {
+ /* rule out impossible LBAs and sector counts */
+ if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
+ return 1;
+
+ if (lba48) {
+ tf->hob_nsect = scsicmd[12];
+ tf->hob_lbal = scsicmd[6];
+ tf->hob_lbam = scsicmd[5];
+ tf->hob_lbah = scsicmd[4];
+
+ qc->nsect = ((unsigned int)scsicmd[12] << 8) |
+ scsicmd[13];
+ } else {
+ /* once again, filter out impossible non-zero values */
+ if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
+ (scsicmd[6] & 0xf0))
+ return 1;
+
+ /* stores LBA27:24 in lower 4 bits of device reg */
+ tf->device |= scsicmd[2];
+
+ qc->nsect = scsicmd[13];
+ }
+ tf->device |= ATA_LBA;
+
+ tf->nsect = scsicmd[13];
+ tf->lbal = scsicmd[9];
+ tf->lbam = scsicmd[8];
+ tf->lbah = scsicmd[7];
+
+ VPRINTK("sixteen-byte command\n");
+ return 0;
+ }
+
+ DPRINTK("no-byte command\n");
+ return 1;
+}
+
+/**
+ * ata_scsi_rw_queue -
+ * @ap:
+ * @dev:
+ * @cmd:
+ * @done:
+ * @cmd_size:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev,
+ Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *),
+ unsigned int cmd_size)
+{
+ struct ata_queued_cmd *qc;
+ u8 *scsicmd = cmd->cmnd;
+
+ VPRINTK("ENTER\n");
+
+ if (unlikely(cmd->request_bufflen < 1)) {
+ printk(KERN_WARNING "ata%u(%u): empty request buffer\n",
+ ap->id, dev->devno);
+ goto err_out;
+ }
+
+ qc = ata_scsi_qc_new(ap, dev, cmd, done);
+ if (!qc)
+ return;
+
+ qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */
+
+ if (ata_scsi_rw_xlat(qc, scsicmd, cmd_size))
+ goto err_out;
+
+ /* select device, send command to hardware */
+ if (ata_qc_issue(qc))
+ goto err_out;
+
+ VPRINTK("EXIT\n");
+ return;
+
+err_out:
+ ata_bad_cdb(cmd, done);
+ DPRINTK("EXIT - badcmd\n");
+}
+
+/**
+ * ata_scsi_rbuf_get - Map response buffer.
+ * @cmd: SCSI command containing buffer to be mapped.
+ * @buf_out: Pointer to mapped area.
+ *
+ * Maps buffer contained within SCSI command @cmd.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ * FIXME: kmap inside spin_lock_irqsave ok?
+ *
+ * RETURNS:
+ * Length of response buffer.
+ */
+
+static unsigned int ata_scsi_rbuf_get(Scsi_Cmnd *cmd, u8 **buf_out)
+{
+ u8 *buf;
+ unsigned int buflen;
+
+ if (cmd->use_sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *) cmd->request_buffer;
+ buf = kmap(sg->page) + sg->offset;
+ buflen = sg->length;
+ } else {
+ buf = cmd->request_buffer;
+ buflen = cmd->request_bufflen;
+ }
+
+ memset(buf, 0, buflen);
+ *buf_out = buf;
+ return buflen;
+}
+
+/**
+ * ata_scsi_rbuf_put - Unmap response buffer.
+ * @cmd: SCSI command containing buffer to be unmapped.
+ *
+ * Unmaps response buffer contained within @cmd.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+static inline void ata_scsi_rbuf_put(Scsi_Cmnd *cmd)
+{
+ if (cmd->use_sg) {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *) cmd->request_buffer;
+ kunmap(sg->page);
+ }
+}
+
+/**
+ * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
+ * @args: Port / device / SCSI command of interest.
+ * @actor: Callback hook for desired SCSI command simulator
+ *
+ * Takes care of the hard work of simulating a SCSI command...
+ * Mapping the response buffer, calling the command's handler,
+ * and handling the handler's return value. This return value
+ * indicates whether the handler wishes the SCSI command to be
+ * completed successfully, or not.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+ unsigned int (*actor) (struct ata_scsi_args *args,
+ u8 *rbuf, unsigned int buflen))
+{
+ u8 *rbuf;
+ unsigned int buflen, rc;
+ Scsi_Cmnd *cmd = args->cmd;
+
+ buflen = ata_scsi_rbuf_get(cmd, &rbuf);
+ rc = actor(args, rbuf, buflen);
+ ata_scsi_rbuf_put(cmd);
+
+ if (rc)
+ ata_bad_cdb(cmd, args->done);
+ else {
+ cmd->result = SAM_STAT_GOOD;
+ args->done(cmd);
+ }
+}
+
+/**
+ * ata_scsiop_inq_std - Simulate INQUIRY command
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Returns standard device identification data associated
+ * with non-EVPD INQUIRY command output.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ const u8 hdr[] = {
+ TYPE_DISK,
+ 0,
+ 0x5, /* claim SPC-3 version compatibility */
+ 2,
+ 96 - 4
+ };
+
+ VPRINTK("ENTER\n");
+
+ memcpy(rbuf, hdr, sizeof(hdr));
+
+ if (buflen > 36) {
+ memcpy(&rbuf[8], args->dev->vendor, 8);
+ memcpy(&rbuf[16], args->dev->product, 16);
+ memcpy(&rbuf[32], DRV_VERSION, 4);
+ }
+
+ if (buflen > 63) {
+ const u8 versions[] = {
+ 0x60, /* SAM-3 (no version claimed) */
+
+ 0x03,
+ 0x20, /* SBC-2 (no version claimed) */
+
+ 0x02,
+ 0x60 /* SPC-3 (no version claimed) */
+ };
+
+ memcpy(rbuf + 59, versions, sizeof(versions));
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_inq_00 - Simulate INQUIRY EVPD page 0, list of pages
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Returns list of inquiry EVPD pages available.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ const u8 pages[] = {
+ 0x00, /* page 0x00, this page */
+ 0x80, /* page 0x80, unit serial no page */
+ 0x83 /* page 0x83, device ident page */
+ };
+ rbuf[3] = sizeof(pages); /* number of supported EVPD pages */
+
+ if (buflen > 6)
+ memcpy(rbuf + 4, pages, sizeof(pages));
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_inq_80 - Simulate INQUIRY EVPD page 80, device serial number
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Returns ATA device serial number.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ const u8 hdr[] = {
+ 0,
+ 0x80, /* this page code */
+ 0,
+ ATA_SERNO_LEN, /* page len */
+ };
+ memcpy(rbuf, hdr, sizeof(hdr));
+
+ if (buflen > (ATA_SERNO_LEN + 4))
+ ata_dev_id_string(args->dev, (unsigned char *) &rbuf[4],
+ ATA_ID_SERNO_OFS, ATA_SERNO_LEN);
+
+ return 0;
+}
+
+static const char *inq_83_str = "Linux ATA-SCSI simulator";
+
+/**
+ * ata_scsiop_inq_83 - Simulate INQUIRY EVPD page 83, device identity
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Returns device identification. Currently hardcoded to
+ * return "Linux ATA-SCSI simulator".
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ rbuf[1] = 0x83; /* this page code */
+ rbuf[3] = 4 + strlen(inq_83_str); /* page len */
+
+ /* our one and only identification descriptor (vendor-specific) */
+ if (buflen > (strlen(inq_83_str) + 4 + 4)) {
+ rbuf[4 + 0] = 2; /* code set: ASCII */
+ rbuf[4 + 3] = strlen(inq_83_str);
+ memcpy(rbuf + 4 + 4, inq_83_str, strlen(inq_83_str));
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_noop -
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * No operation. Simply returns success to caller, to indicate
+ * that the caller should successfully complete this SCSI command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ VPRINTK("ENTER\n");
+ return 0;
+}
+
+/**
+ * ata_scsiop_sync_cache - Simulate SYNCHRONIZE CACHE command
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Initiates flush of device's cache.
+ *
+ * TODO:
+ * Actually do this :)
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ VPRINTK("ENTER\n");
+
+ /* FIXME */
+ return 1;
+}
+
+/**
+ * ata_msense_push - Push data onto MODE SENSE data output buffer
+ * @ptr_io: (input/output) Location to store more output data
+ * @last: End of output data buffer
+ * @buf: Pointer to BLOB being added to output buffer
+ * @buflen: Length of BLOB
+ *
+ * Store MODE SENSE data on an output buffer.
+ *
+ * LOCKING:
+ * None.
+ */
+
+static void ata_msense_push(u8 **ptr_io, const u8 *last,
+ const u8 *buf, unsigned int buflen)
+{
+ u8 *ptr = *ptr_io;
+
+ if ((ptr + buflen - 1) > last)
+ return;
+
+ memcpy(ptr, buf, buflen);
+
+ ptr += buflen;
+
+ *ptr_io = ptr;
+}
+
+/**
+ * ata_msense_caching - Simulate MODE SENSE caching info page
+ * @dev:
+ * @ptr_io:
+ * @last:
+ *
+ * Generate a caching info page, which conditionally indicates
+ * write caching to the SCSI layer, depending on device
+ * capabilities.
+ *
+ * LOCKING:
+ * None.
+ */
+
+static unsigned int ata_msense_caching(struct ata_device *dev, u8 **ptr_io,
+ const u8 *last)
+{
+ u8 page[7] = { 0xf, 0, 0x10, 0, 0x8, 0xa, 0 };
+ if (dev->flags & ATA_DFLAG_WCACHE)
+ page[6] = 0x4;
+
+ ata_msense_push(ptr_io, last, page, sizeof(page));
+ return sizeof(page);
+}
+
+/**
+ * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
+ * @dev:
+ * @ptr_io:
+ * @last:
+ *
+ * Generate a generic MODE SENSE control mode page.
+ *
+ * LOCKING:
+ * None.
+ */
+
+static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
+{
+ const u8 page[] = {0xa, 0xa, 2, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 30};
+
+ ata_msense_push(ptr_io, last, page, sizeof(page));
+ return sizeof(page);
+}
+
+/**
+ * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Simulate MODE SENSE commands.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ u8 *scsicmd = args->cmd->cmnd, *p, *last;
+ struct ata_device *dev = args->dev;
+ unsigned int page_control, six_byte, output_len;
+
+ VPRINTK("ENTER\n");
+
+ six_byte = (scsicmd[0] == MODE_SENSE);
+
+ /* we only support saved and current values (which we treat
+ * in the same manner)
+ */
+ page_control = scsicmd[2] >> 6;
+ if ((page_control != 0) && (page_control != 3))
+ return 1;
+
+ if (six_byte)
+ output_len = 4;
+ else
+ output_len = 8;
+
+ p = rbuf + output_len;
+ last = rbuf + buflen - 1;
+
+ switch(scsicmd[2] & 0x3f) {
+ case 0x08: /* caching */
+ output_len += ata_msense_caching(dev, &p, last);
+ break;
+
+ case 0x0a: { /* control mode */
+ output_len += ata_msense_ctl_mode(&p, last);
+ break;
+ }
+
+ case 0x3f: /* all pages */
+ output_len += ata_msense_caching(dev, &p, last);
+ output_len += ata_msense_ctl_mode(&p, last);
+ break;
+
+ default: /* invalid page code */
+ return 1;
+ }
+
+ if (six_byte) {
+ output_len--;
+ rbuf[0] = output_len;
+ } else {
+ output_len -= 2;
+ rbuf[0] = output_len >> 8;
+ rbuf[1] = output_len;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Simulate READ CAPACITY commands.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ u64 n_sectors = args->dev->n_sectors;
+ u32 tmp;
+
+ VPRINTK("ENTER\n");
+
+ n_sectors--; /* one off */
+
+ tmp = n_sectors; /* note: truncates, if lba48 */
+ if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ rbuf[0] = tmp >> (8 * 3);
+ rbuf[1] = tmp >> (8 * 2);
+ rbuf[2] = tmp >> (8 * 1);
+ rbuf[3] = tmp;
+
+ tmp = ATA_SECT_SIZE;
+ rbuf[6] = tmp >> 8;
+ rbuf[7] = tmp;
+
+ } else {
+ rbuf[2] = n_sectors >> (8 * 7);
+ rbuf[3] = n_sectors >> (8 * 6);
+ rbuf[4] = n_sectors >> (8 * 5);
+ rbuf[5] = n_sectors >> (8 * 4);
+ rbuf[6] = tmp >> (8 * 3);
+ rbuf[7] = tmp >> (8 * 2);
+ rbuf[8] = tmp >> (8 * 1);
+ rbuf[9] = tmp;
+
+ tmp = ATA_SECT_SIZE;
+ rbuf[12] = tmp >> 8;
+ rbuf[13] = tmp;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_report_luns - Simulate REPORT LUNS command
+ * @args: Port / device / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ * @buflen: Response buffer length.
+ *
+ * Simulate REPORT LUNS command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen)
+{
+ VPRINTK("ENTER\n");
+ rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
+
+ return 0;
+}
+
+/**
+ * ata_scsi_badcmd -
+ * @cmd:
+ * @done:
+ * @asc:
+ * @ascq:
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+void ata_scsi_badcmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *), u8 asc, u8 ascq)
+{
+ DPRINTK("ENTER\n");
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */
+ cmd->sense_buffer[12] = asc;
+ cmd->sense_buffer[13] = ascq;
+
+ done(cmd);
+}
+
+/**
+ * atapi_scsi_queuecmd - Send CDB to ATAPI device
+ * @ap: Port to which ATAPI device is attached.
+ * @dev: Target device for CDB.
+ * @cmd: SCSI command being sent to device.
+ * @done: SCSI command completion function.
+ *
+ * Sends CDB to ATAPI device. If the Linux SCSI layer sends a
+ * non-data command, then this function handles the command
+ * directly, via polling. Otherwise, the bmdma engine is started.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+
+static void atapi_scsi_queuecmd(struct ata_port *ap, struct ata_device *dev,
+ Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+ struct ata_queued_cmd *qc;
+ u8 *scsicmd = cmd->cmnd, status;
+ unsigned int doing_dma = 0;
+
+ VPRINTK("ENTER, drv_stat = 0x%x\n", ata_chk_status(ap));
+
+ if (cmd->sc_data_direction == SCSI_DATA_UNKNOWN) {
+ DPRINTK("unknown data, scsicmd 0x%x\n", scsicmd[0]);
+ ata_bad_cdb(cmd, done);
+ return;
+ }
+
+ switch(scsicmd[0]) {
+ case READ_6:
+ case WRITE_6:
+ case MODE_SELECT:
+ case MODE_SENSE:
+ DPRINTK("read6/write6/modesel/modesense trap\n");
+ ata_bad_scsiop(cmd, done);
+ return;
+
+ default:
+ /* do nothing */
+ break;
+ }
+
+ qc = ata_scsi_qc_new(ap, dev, cmd, done);
+ if (!qc) {
+ printk(KERN_ERR "ata%u: command queue empty\n", ap->id);
+ return;
+ }
+
+ qc->flags |= ATA_QCFLAG_ATAPI;
+
+ qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ if (cmd->sc_data_direction == SCSI_DATA_WRITE) {
+ qc->flags |= ATA_QCFLAG_WRITE;
+ DPRINTK("direction: write\n");
+ }
+
+ qc->tf.command = ATA_CMD_PACKET;
+
+ /* set up SG table */
+ if (cmd->sc_data_direction == SCSI_DATA_NONE) {
+ ap->active_tag = qc->tag;
+ qc->flags |= ATA_QCFLAG_ACTIVE | ATA_QCFLAG_POLL;
+ qc->tf.protocol = ATA_PROT_ATAPI;
+
+ ata_dev_select(ap, dev->devno, 1, 0);
+
+ DPRINTK("direction: none\n");
+ qc->tf.ctl |= ATA_NIEN; /* disable interrupts */
+ ata_tf_to_host_nolock(ap, &qc->tf);
+ } else {
+ qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */
+ qc->tf.feature = ATAPI_PKT_DMA;
+ qc->tf.protocol = ATA_PROT_ATAPI_DMA;
+
+ doing_dma = 1;
+
+ /* select device, send command to hardware */
+ if (ata_qc_issue(qc))
+ goto err_out;
+ }
+
+ status = ata_busy_wait(ap, ATA_BUSY, 1000);
+ if (status & ATA_BUSY) {
+ ata_thread_wake(ap, THR_PACKET);
+ return;
+ }
+ if ((status & ATA_DRQ) == 0)
+ goto err_out;
+
+ /* FIXME: mmio-ize */
+ DPRINTK("writing cdb\n");
+ outsl(ap->ioaddr.data_addr, scsicmd, ap->host->max_cmd_len / 4);
+
+ if (!doing_dma)
+ ata_thread_wake(ap, THR_PACKET);
+
+ VPRINTK("EXIT\n");
+ return;
+
+err_out:
+ if (!doing_dma)
+ ata_irq_on(ap); /* re-enable interrupts */
+ ata_bad_cdb(cmd, done);
+ DPRINTK("EXIT - badcmd\n");
+}
+
+/**
+ * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
+ * @cmd: SCSI command to be sent
+ * @done: Completion function, called when command is complete
+ *
+ * In some cases, this function translates SCSI commands into
+ * ATA taskfiles, and queues the taskfiles to be sent to
+ * hardware. In other cases, this function simulates a
+ * SCSI device by evaluating and responding to certain
+ * SCSI commands. This creates the overall effect of
+ * ATA and ATAPI devices appearing as SCSI devices.
+ *
+ * LOCKING:
+ * Releases scsi-layer-held lock, and obtains host_set lock.
+ *
+ * RETURNS:
+ * Zero.
+ */
+
+int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+ u8 *scsicmd = cmd->cmnd;
+ struct ata_port *ap;
+ struct ata_device *dev;
+ struct ata_scsi_args args;
+ const unsigned int atapi_support =
+#ifdef ATA_ENABLE_ATAPI
+ 1;
+#else
+ 0;
+#endif
+
+ /* Note: spin_lock_irqsave is held by caller... */
+ spin_unlock(cmd->device->host->host_lock);
+
+ ap = (struct ata_port *) &cmd->device->host->hostdata[0];
+
+ DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ ap->id,
+ cmd->device->channel, cmd->device->id, cmd->device->lun,
+ scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
+ scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
+ scsicmd[8]);
+
+ /* skip commands not addressed to targets we care about */
+ if ((cmd->device->channel != 0) || (cmd->device->lun != 0) ||
+ (cmd->device->id >= ATA_MAX_DEVICES)) {
+ cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */
+ done(cmd);
+ goto out;
+ }
+
+ spin_lock(&ap->host_set->lock);
+
+ dev = &ap->device[cmd->device->id];
+
+ if (!ata_dev_present(dev)) {
+ DPRINTK("no device\n");
+ cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */
+ done(cmd);
+ goto out_unlock;
+ }
+
+ if (dev->class == ATA_DEV_ATAPI) {
+ if (atapi_support)
+ atapi_scsi_queuecmd(ap, dev, cmd, done);
+ else {
+ cmd->result = (DID_BAD_TARGET << 16); /* correct? */
+ done(cmd);
+ }
+ goto out_unlock;
+ }
+
+ /* fast path */
+ switch(scsicmd[0]) {
+ case READ_6:
+ case WRITE_6:
+ ata_scsi_rw_queue(ap, dev, cmd, done, 6);
+ goto out_unlock;
+
+ case READ_10:
+ case WRITE_10:
+ ata_scsi_rw_queue(ap, dev, cmd, done, 10);
+ goto out_unlock;
+
+ case READ_16:
+ case WRITE_16:
+ ata_scsi_rw_queue(ap, dev, cmd, done, 16);
+ goto out_unlock;
+
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * slow path
+ */
+
+ args.ap = ap;
+ args.dev = dev;
+ args.cmd = cmd;
+ args.done = done;
+
+ switch(scsicmd[0]) {
+ case TEST_UNIT_READY: /* FIXME: correct? */
+ case FORMAT_UNIT: /* FIXME: correct? */
+ case SEND_DIAGNOSTIC: /* FIXME: correct? */
+ ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
+ break;
+
+ case INQUIRY:
+ if (scsicmd[1] & 2) /* is CmdDt set? */
+ ata_bad_cdb(cmd, done);
+ else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
+ else if (scsicmd[2] == 0x00)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
+ else if (scsicmd[2] == 0x80)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
+ else if (scsicmd[2] == 0x83)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
+ else
+ ata_bad_cdb(cmd, done);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+ break;
+
+ case MODE_SELECT: /* unconditionally return */
+ case MODE_SELECT_10: /* bad-field-in-cdb */
+ ata_bad_cdb(cmd, done);
+ break;
+
+ case SYNCHRONIZE_CACHE:
+ if ((dev->flags & ATA_DFLAG_WCACHE) == 0)
+ ata_bad_scsiop(cmd, done);
+ else
+ ata_scsi_rbuf_fill(&args, ata_scsiop_sync_cache);
+ break;
+
+ case READ_CAPACITY:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+ break;
+
+ case SERVICE_ACTION_IN:
+ if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+ else
+ ata_bad_cdb(cmd, done);
+ break;
+
+ case REPORT_LUNS:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+ break;
+
+ /* mandantory commands we haven't implemented yet */
+ case REQUEST_SENSE:
+
+ /* all other commands */
+ default:
+ ata_bad_scsiop(cmd, done);
+ break;
+ }
+
+out_unlock:
+ spin_unlock(&ap->host_set->lock);
+out:
+ spin_lock(cmd->device->host->host_lock);
+ return 0;
+}
+
diff -Nru a/drivers/scsi/libata.h b/drivers/scsi/libata.h
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/libata.h Sat Oct 25 11:45:10 2003
@@ -0,0 +1,94 @@
+/*
+ libata.h - helper library for ATA
+
+ Copyright 2003 Red Hat, Inc. All rights reserved.
+ Copyright 2003 Jeff Garzik
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+ */
+
+#ifndef __LIBATA_H__
+#define __LIBATA_H__
+
+#define DRV_NAME "libata"
+#define DRV_VERSION "0.75" /* must be exactly four chars */
+
+struct ata_scsi_args {
+ struct ata_port *ap;
+ struct ata_device *dev;
+ Scsi_Cmnd *cmd;
+ void (*done)(Scsi_Cmnd *);
+};
+
+
+/* libata-core.c */
+extern unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s,
+ unsigned int ofs, unsigned int len);
+extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
+ struct ata_device *dev);
+extern int ata_qc_issue(struct ata_queued_cmd *qc);
+extern void ata_dev_select(struct ata_port *ap, unsigned int device,
+ unsigned int wait, unsigned int can_sleep);
+extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_thread_wake(struct ata_port *ap, unsigned int thr_state);
+
+
+/* libata-scsi.c */
+extern void ata_to_sense_error(struct ata_queued_cmd *qc);
+extern void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev,
+ Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *),
+ unsigned int cmd_size);
+extern int ata_scsi_error(struct Scsi_Host *host);
+extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+
+extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+
+extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
+ unsigned int buflen);
+extern void ata_scsi_badcmd(Scsi_Cmnd *cmd,
+ void (*done)(Scsi_Cmnd *),
+ u8 asc, u8 ascq);
+extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+ unsigned int (*actor) (struct ata_scsi_args *args,
+ u8 *rbuf, unsigned int buflen));
+
+static inline void ata_bad_scsiop(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+ ata_scsi_badcmd(cmd, done, 0x20, 0x00);
+}
+
+static inline void ata_bad_cdb(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+ ata_scsi_badcmd(cmd, done, 0x24, 0x00);
+}
+
+#endif /* __LIBATA_H__ */
diff -Nru a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/sata_promise.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,766 @@
+/*
+ * sata_promise.c - Promise SATA
+ *
+ * Copyright 2003 Red Hat, Inc.
+ *
+ * The contents of this file are subject to the Open
+ * Software License version 1.1 that can be found at
+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ * by reference.
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of the GNU General Public License version 2 (the "GPL") as distributed
+ * in the kernel source COPYING file, in which case the provisions of
+ * the GPL are applicable instead of the above. If you wish to allow
+ * the use of your version of this file only under the terms of the
+ * GPL and not to allow others to use your version of this file under
+ * the OSL, indicate your decision by deleting the provisions above and
+ * replace them with the notice and other provisions required by the GPL.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under either the OSL or the GPL.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+
+#define DRV_NAME "sata_promise"
+#define DRV_VERSION "0.83"
+
+
+enum {
+ PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
+
+ PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
+ PDC_TBG_MODE = 0x41, /* TBG mode */
+ PDC_FLASH_CTL = 0x44, /* Flash control register */
+ PDC_CTLSTAT = 0x60, /* IDE control and status register */
+ PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
+ PDC_SLEW_CTL = 0x470, /* slew rate control reg */
+ PDC_20621_SEQCTL = 0x400,
+ PDC_20621_SEQMASK = 0x480,
+
+ PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
+
+ board_2037x = 0, /* FastTrak S150 TX2plus */
+ board_20319 = 1, /* FastTrak S150 TX4 */
+ board_20621 = 2, /* FastTrak S150 SX4 */
+
+ PDC_FLAG_20621 = (1 << 30), /* we have a 20621 */
+};
+
+
+static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
+static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static void pdc_sata_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio);
+static void pdc_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma);
+static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static void pdc_dma_start(struct ata_queued_cmd *qc);
+static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
+static void pdc_eng_timeout(struct ata_port *ap);
+static void pdc_20621_phy_reset (struct ata_port *ap);
+
+
+static Scsi_Host_Template pdc_sata_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = ATA_MAX_PRD,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+};
+
+static struct ata_port_operations pdc_sata_ops = {
+ .port_disable = ata_port_disable,
+ .set_piomode = pdc_sata_set_piomode,
+ .set_udmamode = pdc_sata_set_udmamode,
+ .tf_load = ata_tf_load_mmio,
+ .tf_read = ata_tf_read_mmio,
+ .check_status = ata_check_status_mmio,
+ .exec_command = ata_exec_command_mmio,
+ .phy_reset = sata_phy_reset,
+ .phy_config = pata_phy_config, /* not a typo */
+ .bmdma_start = pdc_dma_start,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = pdc_eng_timeout,
+ .irq_handler = pdc_interrupt,
+ .scr_read = pdc_sata_scr_read,
+ .scr_write = pdc_sata_scr_write,
+};
+
+static struct ata_port_operations pdc_20621_ops = {
+ .port_disable = ata_port_disable,
+ .set_piomode = pdc_sata_set_piomode,
+ .set_udmamode = pdc_sata_set_udmamode,
+ .tf_load = ata_tf_load_mmio,
+ .tf_read = ata_tf_read_mmio,
+ .check_status = ata_check_status_mmio,
+ .exec_command = ata_exec_command_mmio,
+ .phy_reset = pdc_20621_phy_reset,
+ .phy_config = pata_phy_config, /* not a typo */
+ .bmdma_start = pdc_dma_start,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = pdc_eng_timeout,
+ .irq_handler = pdc_interrupt,
+};
+
+static struct ata_port_info pdc_port_info[] = {
+ /* board_2037x */
+ {
+ .sht = &pdc_sata_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &pdc_sata_ops,
+ },
+
+ /* board_20319 */
+ {
+ .sht = &pdc_sata_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &pdc_sata_ops,
+ },
+
+ /* board_20621 */
+ {
+ .sht = &pdc_sata_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO |
+ PDC_FLAG_20621,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &pdc_20621_ops,
+ },
+
+};
+
+static struct pci_device_id pdc_sata_pci_tbl[] = {
+ { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_2037x },
+ { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_2037x },
+ { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_20319 },
+ { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_20319 },
+#if 0 /* broken currently */
+ { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_20621 },
+#endif
+ { } /* terminate list */
+};
+
+
+static struct pci_driver pdc_sata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pdc_sata_pci_tbl,
+ .probe = pdc_sata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+
+static void pdc_20621_phy_reset (struct ata_port *ap)
+{
+ VPRINTK("ENTER\n");
+ ap->cbl = ATA_CBL_SATA;
+ ata_port_probe(ap);
+ ata_bus_reset(ap);
+}
+
+static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+{
+ if (sc_reg > SCR_CONTROL)
+ return 0xffffffffU;
+ return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+
+static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
+ u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return;
+ writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+static void pdc_sata_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio)
+{
+ /* dummy */
+}
+
+
+static void pdc_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma)
+{
+ /* dummy */
+}
+
+enum pdc_packet_bits {
+ PDC_PKT_READ = (1 << 2),
+ PDC_PKT_NODATA = (1 << 3),
+
+ PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
+ PDC_PKT_CLEAR_BSY = (1 << 4),
+ PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
+ PDC_LAST_REG = (1 << 3),
+
+ PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
+};
+
+static inline void pdc_pkt_header(struct ata_taskfile *tf, dma_addr_t sg_table,
+ unsigned int devno, u8 *buf)
+{
+ u8 dev_reg;
+ u32 *buf32 = (u32 *) buf;
+
+ /* set control bits (byte 0), zero delay seq id (byte 3),
+ * and seq id (byte 2)
+ */
+ switch (tf->protocol) {
+ case ATA_PROT_DMA_READ:
+ buf32[0] = cpu_to_le32(PDC_PKT_READ);
+ break;
+
+ case ATA_PROT_DMA_WRITE:
+ buf32[0] = 0;
+ break;
+
+ case ATA_PROT_NODATA:
+ buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
+ buf32[2] = 0; /* no next-packet */
+
+ if (devno == 0)
+ dev_reg = ATA_DEVICE_OBS;
+ else
+ dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
+
+ /* select device */
+ buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
+ buf[13] = dev_reg;
+
+ /* device control register */
+ buf[14] = (1 << 5) | PDC_REG_DEVCTL;
+ buf[15] = tf->ctl;
+}
+
+static inline void pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
+ unsigned int i)
+{
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ buf[i++] = (1 << 5) | ATA_REG_DEVICE;
+ buf[i++] = tf->device;
+ }
+
+ /* and finally the command itself; also includes end-of-pkt marker */
+ buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
+ buf[i++] = tf->command;
+}
+
+static void pdc_prep_lba28(struct ata_taskfile *tf, dma_addr_t sg_table,
+ unsigned int devno, u8 *buf)
+{
+ unsigned int i;
+
+ pdc_pkt_header(tf, sg_table, devno, buf);
+
+ /* the "(1 << 5)" should be read "(count << 5)" */
+
+ i = 16;
+
+ /* ATA command block registers */
+ buf[i++] = (1 << 5) | ATA_REG_FEATURE;
+ buf[i++] = tf->feature;
+
+ buf[i++] = (1 << 5) | ATA_REG_NSECT;
+ buf[i++] = tf->nsect;
+
+ buf[i++] = (1 << 5) | ATA_REG_LBAL;
+ buf[i++] = tf->lbal;
+
+ buf[i++] = (1 << 5) | ATA_REG_LBAM;
+ buf[i++] = tf->lbam;
+
+ buf[i++] = (1 << 5) | ATA_REG_LBAH;
+ buf[i++] = tf->lbah;
+
+ pdc_pkt_footer(tf, buf, i);
+}
+
+static void pdc_prep_lba48(struct ata_taskfile *tf, dma_addr_t sg_table,
+ unsigned int devno, u8 *buf)
+{
+ unsigned int i;
+
+ pdc_pkt_header(tf, sg_table, devno, buf);
+
+ /* the "(2 << 5)" should be read "(count << 5)" */
+
+ i = 16;
+
+ /* ATA command block registers */
+ buf[i++] = (2 << 5) | ATA_REG_FEATURE;
+ buf[i++] = tf->hob_feature;
+ buf[i++] = tf->feature;
+
+ buf[i++] = (2 << 5) | ATA_REG_NSECT;
+ buf[i++] = tf->hob_nsect;
+ buf[i++] = tf->nsect;
+
+ buf[i++] = (2 << 5) | ATA_REG_LBAL;
+ buf[i++] = tf->hob_lbal;
+ buf[i++] = tf->lbal;
+
+ buf[i++] = (2 << 5) | ATA_REG_LBAM;
+ buf[i++] = tf->hob_lbam;
+ buf[i++] = tf->lbam;
+
+ buf[i++] = (2 << 5) | ATA_REG_LBAH;
+ buf[i++] = tf->hob_lbah;
+ buf[i++] = tf->lbah;
+
+ pdc_pkt_footer(tf, buf, i);
+}
+
+static inline void __pdc_dma_complete (struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ void *dmactl = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+ u32 val;
+
+ /* clear DMA start/stop bit (bit 7) */
+ val = readl(dmactl);
+ writel(val & ~(1 << 7), dmactl);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_altstatus(ap); /* dummy read */
+}
+
+static inline void pdc_dma_complete (struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ __pdc_dma_complete(ap, qc);
+
+ /* get drive status; clear intr; complete txn */
+ ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
+ ata_wait_idle(ap), 0);
+}
+
+static void pdc_eng_timeout(struct ata_port *ap)
+{
+ u8 drv_stat;
+ struct ata_queued_cmd *qc;
+
+ DPRINTK("ENTER\n");
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (!qc) {
+ printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+ ap->id);
+ goto out;
+ }
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA_READ:
+ case ATA_PROT_DMA_WRITE:
+ printk(KERN_ERR "ata%u: DMA timeout\n", ap->id);
+ __pdc_dma_complete(ap, qc);
+ ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag),
+ ata_wait_idle(ap) | ATA_ERR, 0);
+ break;
+
+ case ATA_PROT_NODATA:
+ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+ printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n",
+ ap->id, qc->tf.command, drv_stat);
+
+ ata_qc_complete(qc, drv_stat, 1);
+ break;
+
+ default:
+ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+ printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
+ ap->id, qc->tf.command, drv_stat);
+
+ ata_qc_complete(qc, drv_stat, 1);
+ break;
+ }
+
+out:
+ DPRINTK("EXIT\n");
+}
+
+static inline unsigned int pdc_host_intr( struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ u8 status;
+ unsigned int handled = 0;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA_READ:
+ case ATA_PROT_DMA_WRITE:
+ pdc_dma_complete(ap, qc);
+ handled = 1;
+ break;
+
+ case ATA_PROT_NODATA: /* command completion, but no data xfer */
+ status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+ DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+ ata_qc_complete(qc, status, 0);
+ handled = 1;
+ break;
+
+ default:
+ ap->stats.idle_irq++;
+ break;
+ }
+
+ return handled;
+}
+
+static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct ata_host_set *host_set = dev_instance;
+ struct ata_port *ap;
+ u32 mask = 0;
+ unsigned int i, tmp;
+ unsigned int handled = 0, have_20621 = 0;
+ void *mmio_base;
+
+ VPRINTK("ENTER\n");
+
+ if (!host_set || !host_set->mmio_base) {
+ VPRINTK("QUICK EXIT\n");
+ return IRQ_NONE;
+ }
+
+ mmio_base = host_set->mmio_base;
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ ap = host_set->ports[i];
+ if (ap && (ap->flags & PDC_FLAG_20621)) {
+ have_20621 = 1;
+ break;
+ }
+ }
+
+ /* reading should also clear interrupts */
+ if (have_20621) {
+ mmio_base += PDC_CHIP0_OFS;
+ mask = readl(mmio_base + PDC_20621_SEQMASK);
+ } else {
+ mask = readl(mmio_base + PDC_INT_SEQMASK);
+ }
+
+ if (mask == 0xffffffff) {
+ VPRINTK("QUICK EXIT 2\n");
+ return IRQ_NONE;
+ }
+ mask &= 0xf; /* only 16 tags possible */
+ if (!mask) {
+ VPRINTK("QUICK EXIT 3\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock_irq(&host_set->lock);
+
+ for (i = 0; i < host_set->n_ports; i++) {
+ VPRINTK("port %u\n", i);
+ ap = host_set->ports[i];
+ tmp = mask & (1 << (i + 1));
+ if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0))
+ handled += pdc_host_intr(ap, qc);
+ }
+ }
+
+ spin_unlock_irq(&host_set->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pdc_dma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_host_set *host_set = ap->host_set;
+ unsigned int port_no = ap->port_no;
+ void *mmio = host_set->mmio_base;
+ void *dmactl = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+ unsigned int rw = (qc->flags & ATA_QCFLAG_WRITE);
+ u32 val;
+ u8 seq = (u8) (port_no + 1);
+
+ wmb(); /* flush writes made to PRD table in DMA memory */
+
+ if (ap->flags & PDC_FLAG_20621)
+ mmio += PDC_CHIP0_OFS;
+
+ VPRINTK("ENTER, ap %p, mmio %p\n", ap, mmio);
+
+ /* indicate where our S/G table is to chip */
+ writel(ap->prd_dma, (void *) ap->ioaddr.cmd_addr + PDC_PRD_TBL);
+
+ /* clear dma start bit (paranoia), clear intr seq id (paranoia),
+ * set DMA direction (bit 6 == from chip -> drive)
+ */
+ val = readl(dmactl);
+ VPRINTK("val == %x\n", val);
+ val &= ~(1 << 7); /* clear dma start/stop bit */
+ if (rw) /* set/clear dma direction bit */
+ val |= (1 << 6);
+ else
+ val &= ~(1 << 6);
+ if (qc->tf.ctl & ATA_NIEN) /* set/clear irq-mask bit */
+ val |= (1 << 10);
+ else
+ val &= ~(1 << 10);
+ writel(val, dmactl);
+ val = readl(dmactl);
+ VPRINTK("val == %x\n", val);
+
+ /* FIXME: clear any intr status bits here? */
+
+ ata_exec_command_mmio(ap, &qc->tf);
+
+ VPRINTK("FIVE\n");
+ if (ap->flags & PDC_FLAG_20621)
+ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+ else
+ writel(0x00000001, mmio + (seq * 4));
+
+ /* start host DMA transaction */
+ writel(val | seq | (1 << 7), dmactl);
+}
+
+static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = base;
+ port->data_addr = base;
+ port->error_addr = base + 0x4;
+ port->nsect_addr = base + 0x8;
+ port->lbal_addr = base + 0xc;
+ port->lbam_addr = base + 0x10;
+ port->lbah_addr = base + 0x14;
+ port->device_addr = base + 0x18;
+ port->cmdstat_addr = base + 0x1c;
+ port->ctl_addr = base + 0x38;
+}
+
+static void pdc_20621_init(struct ata_probe_ent *pe)
+{
+}
+
+static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
+{
+ void *mmio = pe->mmio_base;
+ u32 tmp;
+
+ if (chip_id == board_20621)
+ return;
+
+ /* change FIFO_SHD to 8 dwords. Promise driver does this...
+ * dunno why.
+ */
+ tmp = readl(mmio + PDC_FLASH_CTL);
+ if ((tmp & (1 << 16)) == 0)
+ writel(tmp | (1 << 16), mmio + PDC_FLASH_CTL);
+
+ /* clear plug/unplug flags for all ports */
+ tmp = readl(mmio + PDC_SATA_PLUG_CSR);
+ writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR);
+
+ /* mask plug/unplug ints */
+ tmp = readl(mmio + PDC_SATA_PLUG_CSR);
+ writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR);
+
+ /* reduce TBG clock to 133 Mhz. FIXME: why? */
+ tmp = readl(mmio + PDC_TBG_MODE);
+ tmp &= ~0x30000; /* clear bit 17, 16*/
+ tmp |= 0x10000; /* set bit 17:16 = 0:1 */
+ writel(tmp, mmio + PDC_TBG_MODE);
+
+ /* adjust slew rate control register. FIXME: why? */
+ tmp = readl(mmio + PDC_SLEW_CTL);
+ tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
+ tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
+ writel(tmp, mmio + PDC_SLEW_CTL);
+}
+
+static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_probe_ent *probe_ent = NULL;
+ unsigned long base;
+ void *mmio_base;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ int rc;
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ /*
+ * If this driver happens to only be useful on Apple's K2, then
+ * we should check that here as it has a normal Serverworks ID
+ */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ goto err_out_regions;
+
+ probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+ if (probe_ent == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ memset(probe_ent, 0, sizeof(*probe_ent));
+ probe_ent->pdev = pdev;
+ INIT_LIST_HEAD(&probe_ent->node);
+
+ mmio_base = ioremap(pci_resource_start(pdev, 3),
+ pci_resource_len(pdev, 3));
+ if (mmio_base == NULL) {
+ rc = -ENOMEM;
+ goto err_out_free_ent;
+ }
+ base = (unsigned long) mmio_base;
+
+ probe_ent->sht = pdc_port_info[board_idx].sht;
+ probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
+ probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
+ probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
+ probe_ent->port_ops = pdc_port_info[board_idx].port_ops;
+
+ probe_ent->irq = pdev->irq;
+ probe_ent->irq_flags = SA_SHIRQ;
+ probe_ent->mmio_base = mmio_base;
+
+ if (board_idx == board_20621)
+ base += PDC_CHIP0_OFS;
+
+ pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
+ probe_ent->port[0].scr_addr = base + 0x400;
+
+ pdc_sata_setup_port(&probe_ent->port[1], base + 0x280);
+ probe_ent->port[1].scr_addr = base + 0x500;
+
+ /* notice 4-port boards */
+ switch (board_idx) {
+ case board_20319:
+ case board_20621:
+ probe_ent->n_ports = 4;
+
+ pdc_sata_setup_port(&probe_ent->port[2], base + 0x300);
+ probe_ent->port[2].scr_addr = base + 0x600;
+
+ pdc_sata_setup_port(&probe_ent->port[3], base + 0x380);
+ probe_ent->port[3].scr_addr = base + 0x700;
+ break;
+ case board_2037x:
+ probe_ent->n_ports = 2;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ pci_set_master(pdev);
+
+ /* initialize adapter */
+ switch (board_idx) {
+ case board_20621:
+ pdc_20621_init(probe_ent);
+ break;
+
+ default:
+ pdc_host_init(board_idx, probe_ent);
+ break;
+ }
+
+ /* FIXME: check ata_device_add return value */
+ ata_device_add(probe_ent);
+ kfree(probe_ent);
+
+ return 0;
+
+err_out_free_ent:
+ kfree(probe_ent);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+
+static int __init pdc_sata_init(void)
+{
+ int rc;
+
+ rc = pci_module_init(&pdc_sata_pci_driver);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+
+static void __exit pdc_sata_exit(void)
+{
+ pci_unregister_driver(&pdc_sata_pci_driver);
+}
+
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Promise SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
+
+module_init(pdc_sata_init);
+module_exit(pdc_sata_exit);
diff -Nru a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/sata_sil.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,331 @@
+/*
+ * ata_sil.c - Silicon Image SATA
+ *
+ * Copyright 2003 Red Hat, Inc.
+ * Copyright 2003 Benjamin Herrenschmidt
+ *
+ * The contents of this file are subject to the Open
+ * Software License version 1.1 that can be found at
+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ * by reference.
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of the GNU General Public License version 2 (the "GPL") as distributed
+ * in the kernel source COPYING file, in which case the provisions of
+ * the GPL are applicable instead of the above. If you wish to allow
+ * the use of your version of this file only under the terms of the
+ * GPL and not to allow others to use your version of this file under
+ * the OSL, indicate your decision by deleting the provisions above and
+ * replace them with the notice and other provisions required by the GPL.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under either the OSL or the GPL.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+
+#define DRV_NAME "ata_sil"
+#define DRV_VERSION "0.51"
+
+enum {
+ sil_3112 = 0,
+
+ SIL_IDE0_TF = 0x80,
+ SIL_IDE0_CTL = 0x8A,
+ SIL_IDE0_BMDMA = 0x00,
+ SIL_IDE0_SCR = 0x100,
+
+ SIL_IDE1_TF = 0xC0,
+ SIL_IDE1_CTL = 0xCA,
+ SIL_IDE1_BMDMA = 0x08,
+ SIL_IDE1_SCR = 0x180,
+};
+
+static void sil_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio);
+static void sil_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma);
+static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
+static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
+static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+
+static struct pci_device_id sil_pci_tbl[] = {
+ { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
+ { } /* terminate list */
+};
+
+static struct pci_driver sil_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sil_pci_tbl,
+ .probe = sil_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static Scsi_Host_Template sil_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = ATA_MAX_PRD,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+};
+
+static struct ata_port_operations sil_ops = {
+ .port_disable = ata_port_disable,
+ .dev_config = sil_dev_config,
+ .set_piomode = sil_set_piomode,
+ .set_udmamode = sil_set_udmamode,
+ .tf_load = ata_tf_load_mmio,
+ .tf_read = ata_tf_read_mmio,
+ .check_status = ata_check_status_mmio,
+ .exec_command = ata_exec_command_mmio,
+ .phy_reset = sata_phy_reset,
+ .phy_config = pata_phy_config, /* not a typo */
+ .bmdma_start = ata_bmdma_start_mmio,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = ata_eng_timeout,
+ .irq_handler = ata_interrupt,
+ .scr_read = sil_scr_read,
+ .scr_write = sil_scr_write,
+};
+
+static struct ata_port_info sil_port_info[] = {
+ /* sil_3112 */
+ {
+ .sht = &sil_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6; FIXME */
+ .port_ops = &sil_ops,
+ },
+};
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
+
+static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
+{
+ unsigned long offset = ap->ioaddr.scr_addr;
+
+ switch (sc_reg) {
+ case SCR_STATUS:
+ return offset + 4;
+ case SCR_ERROR:
+ return offset + 8;
+ case SCR_CONTROL:
+ return offset;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
+{
+ void *mmio = (void *) sil_scr_addr(ap, sc_reg);
+ if (mmio)
+ return readl(mmio);
+ return 0xffffffffU;
+}
+
+static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+{
+ void *mmio = (void *) sil_scr_addr(ap, sc_reg);
+ if (mmio)
+ writel(val, mmio);
+}
+
+/**
+ * sil_dev_config - Apply device/host-specific errata fixups
+ * @ap: Port containing device to be examined
+ * @dev: Device to be examined
+ *
+ * After the IDENTIFY [PACKET] DEVICE step is complete, and a
+ * device is known to be present, this function is called.
+ * We apply two errata fixups which are specific to Silicon Image,
+ * a Seagate and a Maxtor fixup.
+ *
+ * For certain Seagate devices, we must limit the maximum sectors
+ * to under 8K.
+ *
+ * For certain Maxtor devices, we must not program the drive
+ * beyond udma5.
+ *
+ * Both fixups are unfairly pessimistic. As soon as I get more
+ * information on these errata, I will create a more exhaustive
+ * list, and apply the fixups to only the specific
+ * devices/hosts/firmwares that need it.
+ */
+static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
+{
+ const char *s = &dev->product[0];
+ unsigned int len = strnlen(s, sizeof(dev->product));
+
+ /* ATAPI specifies that empty space is blank-filled; remove blanks */
+ while ((len > 0) && (s[len - 1] == ' '))
+ len--;
+
+ /* limit to udma5 */
+ if (!memcmp(s, "Maxtor ", 7)) {
+ printk(KERN_INFO "ata%u(%u): applying pessimistic Maxtor errata fix\n",
+ ap->id, dev->devno);
+ ap->udma_mask &= ATA_UDMA5;
+ return;
+ }
+
+ /* limit requests to 15 sectors */
+ if ((len > 4) && (!memcmp(s, "ST", 2))) {
+ if ((!memcmp(s + len - 2, "AS", 2)) ||
+ (!memcmp(s + len - 3, "ASL", 3))) {
+ printk(KERN_INFO "ata%u(%u): applying pessimistic Seagate errata fix\n",
+ ap->id, dev->devno);
+ ap->host->max_sectors = 15;
+ ap->host->hostt->max_sectors = 15;
+ return;
+ }
+ }
+}
+
+static void sil_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio)
+{
+ /* We need empty implementation, the core doesn't test for NULL
+ * function pointer
+ */
+}
+
+static void sil_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma)
+{
+ /* We need empty implementation, the core doesn't test for NULL
+ * function pointer
+ */
+}
+
+static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_probe_ent *probe_ent = NULL;
+ unsigned long base;
+ void *mmio_base;
+ int rc;
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ /*
+ * If this driver happens to only be useful on Apple's K2, then
+ * we should check that here as it has a normal Serverworks ID
+ */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ goto err_out_regions;
+
+ probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+ if (probe_ent == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ memset(probe_ent, 0, sizeof(*probe_ent));
+ INIT_LIST_HEAD(&probe_ent->node);
+ probe_ent->pdev = pdev;
+ probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops;
+ probe_ent->sht = sil_port_info[ent->driver_data].sht;
+ probe_ent->n_ports = 2;
+ probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask;
+ probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
+ probe_ent->irq = pdev->irq;
+ probe_ent->irq_flags = SA_SHIRQ;
+ probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
+
+ mmio_base = ioremap(pci_resource_start(pdev, 5),
+ pci_resource_len(pdev, 5));
+ if (mmio_base == NULL) {
+ rc = -ENOMEM;
+ goto err_out_free_ent;
+ }
+
+ probe_ent->mmio_base = mmio_base;
+
+ base = (unsigned long) mmio_base;
+ probe_ent->port[0].cmd_addr = base + SIL_IDE0_TF;
+ probe_ent->port[0].ctl_addr = base + SIL_IDE0_CTL;
+ probe_ent->port[0].bmdma_addr = base + SIL_IDE0_BMDMA;
+ probe_ent->port[0].scr_addr = base + SIL_IDE0_SCR;
+ ata_std_ports(&probe_ent->port[0]);
+
+ probe_ent->port[1].cmd_addr = base + SIL_IDE1_TF;
+ probe_ent->port[1].ctl_addr = base + SIL_IDE1_CTL;
+ probe_ent->port[1].bmdma_addr = base + SIL_IDE1_BMDMA;
+ probe_ent->port[1].scr_addr = base + SIL_IDE1_SCR;
+ ata_std_ports(&probe_ent->port[1]);
+
+ pci_set_master(pdev);
+
+ /* FIXME: check ata_device_add return value */
+ ata_device_add(probe_ent);
+ kfree(probe_ent);
+
+ return 0;
+
+err_out_free_ent:
+ kfree(probe_ent);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static int __init sil_init(void)
+{
+ int rc;
+
+ rc = pci_module_init(&sil_pci_driver);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void __exit sil_exit(void)
+{
+ pci_unregister_driver(&sil_pci_driver);
+}
+
+
+module_init(sil_init);
+module_exit(sil_exit);
diff -Nru a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/sata_svw.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,389 @@
+/*
+ * ata_k2.c - Broadcom (Apple K2) SATA
+ *
+ * Copyright 2003 Benjamin Herrenschmidt
+ *
+ * Bits from Jeff Garzik, Copyright RedHat, Inc.
+ *
+ * This driver probably works with non-Apple versions of the
+ * Broadcom chipset...
+ *
+ * The contents of this file are subject to the Open
+ * Software License version 1.1 that can be found at
+ * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ * by reference.
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of the GNU General Public License version 2 (the "GPL") as distributed
+ * in the kernel source COPYING file, in which case the provisions of
+ * the GPL are applicable instead of the above. If you wish to allow
+ * the use of your version of this file only under the terms of the
+ * GPL and not to allow others to use your version of this file under
+ * the OSL, indicate your decision by deleting the provisions above and
+ * replace them with the notice and other provisions required by the GPL.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under either the OSL or the GPL.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+
+#ifdef CONFIG_ALL_PPC
+#include
+#include
+#endif /* CONFIG_ALL_PPC */
+
+#define DRV_NAME "ata_k2"
+#define DRV_VERSION "1.02"
+
+
+static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+{
+ if (sc_reg > SCR_CONTROL)
+ return 0xffffffffU;
+ return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+
+static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
+ u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return;
+ writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4));
+}
+
+
+static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ writeb(tf->ctl, ioaddr->ctl_addr);
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->error_addr);
+ writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr);
+ writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr);
+ writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr);
+ writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr);
+ } else if (is_addr) {
+ writew(tf->feature, ioaddr->error_addr);
+ writew(tf->nsect, ioaddr->nsect_addr);
+ writew(tf->lbal, ioaddr->lbal_addr);
+ writew(tf->lbam, ioaddr->lbam_addr);
+ writew(tf->lbah, ioaddr->lbah_addr);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ writeb(tf->device, ioaddr->device_addr);
+
+ ata_wait_idle(ap);
+}
+
+
+static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u16 nsect, lbal, lbam, lbah;
+
+ nsect = tf->nsect = readw(ioaddr->nsect_addr);
+ lbal = tf->lbal = readw(ioaddr->lbal_addr);
+ lbam = tf->lbam = readw(ioaddr->lbam_addr);
+ lbah = tf->lbah = readw(ioaddr->lbah_addr);
+ tf->device = readw(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ tf->hob_feature = readw(ioaddr->error_addr) >> 8;
+ tf->hob_nsect = nsect >> 8;
+ tf->hob_lbal = lbal >> 8;
+ tf->hob_lbam = lbam >> 8;
+ tf->hob_lbah = lbah >> 8;
+ }
+}
+
+
+static u8 k2_stat_check_status(struct ata_port *ap)
+{
+ return readl((void *) ap->ioaddr.cmdstat_addr);
+}
+
+static void k2_sata_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio)
+{
+ /* We need empty implementation, the core doesn't test for NULL
+ * function pointer
+ */
+}
+
+
+static void k2_sata_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma)
+{
+ /* We need empty implementation, the core doesn't test for NULL
+ * function pointer
+ */
+}
+
+
+#ifdef CONFIG_ALL_PPC
+/*
+ * k2_sata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+static int k2_sata_proc_info(char *page, char **start, off_t offset, int count,
+ int hostno, int inout)
+{
+ struct Scsi_Host *hpnt;
+ struct ata_port *ap;
+ struct device_node *np;
+ int len, index;
+
+ /* Find ourself. That's locking-broken, shitty etc... but thanks to
+ * /proc/scsi interface and lack of state kept around in this driver,
+ * its best I want to do for now...
+ */
+ hpnt = scsi_hostlist;
+ while (hpnt) {
+ if (hostno == hpnt->host_no)
+ break;
+ hpnt = hpnt->next;
+ }
+ if (!hpnt)
+ return 0;
+
+ /* Find the ata_port */
+ ap = (struct ata_port *) &hpnt->hostdata[0];
+ if (ap == NULL)
+ return 0;
+
+ /* Find the OF node for the PCI device proper */
+ np = pci_device_to_OF_node(ap->host_set->pdev);
+ if (np == NULL)
+ return 0;
+
+ /* Match it to a port node */
+ index = (ap == ap->host_set->ports[0]) ? 0 : 1;
+ for (np = np->child; np != NULL; np = np->sibling) {
+ u32 *reg = (u32 *)get_property(np, "reg", NULL);
+ if (!reg)
+ continue;
+ if (index == *reg)
+ break;
+ }
+ if (np == NULL)
+ return 0;
+
+ len = sprintf(page, "devspec: %s\n", np->full_name);
+
+ return len;
+}
+#endif /* CONFIG_ALL_PPC */
+
+
+static Scsi_Host_Template k2_sata_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = ATA_MAX_PRD,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+#ifdef CONFIG_ALL_PPC
+ .proc_info = k2_sata_proc_info
+#endif
+};
+
+
+static struct ata_port_operations k2_sata_ops = {
+ .port_disable = ata_port_disable,
+ .set_piomode = k2_sata_set_piomode,
+ .set_udmamode = k2_sata_set_udmamode,
+ .tf_load = k2_sata_tf_load,
+ .tf_read = k2_sata_tf_read,
+ .check_status = k2_stat_check_status,
+ .exec_command = ata_exec_command_mmio,
+ .phy_reset = sata_phy_reset,
+ .phy_config = pata_phy_config, /* not a typo */
+ .bmdma_start = ata_bmdma_start_mmio,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = ata_eng_timeout,
+ .irq_handler = ata_interrupt,
+ .scr_read = k2_sata_scr_read,
+ .scr_write = k2_sata_scr_write,
+};
+
+
+static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = base;
+ port->data_addr = base;
+ port->error_addr = base + 0x4;
+ port->nsect_addr = base + 0x8;
+ port->lbal_addr = base + 0xc;
+ port->lbam_addr = base + 0x10;
+ port->lbah_addr = base + 0x14;
+ port->device_addr = base + 0x18;
+ port->cmdstat_addr = base + 0x1c;
+ port->ctl_addr = base + 0x20;
+ port->bmdma_addr = base + 0x30;
+ port->scr_addr = base + 0x40;
+}
+
+
+static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_probe_ent *probe_ent = NULL;
+ unsigned long base;
+ void *mmio_base;
+ int rc;
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ /*
+ * If this driver happens to only be useful on Apple's K2, then
+ * we should check that here as it has a normal Serverworks ID
+ */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ goto err_out_regions;
+
+ probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+ if (probe_ent == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ memset(probe_ent, 0, sizeof(*probe_ent));
+ probe_ent->pdev = pdev;
+ INIT_LIST_HEAD(&probe_ent->node);
+
+ mmio_base = ioremap(pci_resource_start(pdev, 5),
+ pci_resource_len(pdev, 5));
+ if (mmio_base == NULL) {
+ rc = -ENOMEM;
+ goto err_out_free_ent;
+ }
+ base = (unsigned long) mmio_base;
+
+ /*
+ * Check for the "disabled" second function to avoid registering
+ * useless interfaces on K2
+ */
+ if (readl(mmio_base + 0x40) == 0xffffffffUL &&
+ readl(mmio_base + 0x140) == 0xffffffffUL) {
+ rc = -ENODEV;
+ goto err_out_unmap;
+ }
+ probe_ent->sht = &k2_sata_sht;
+ probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO;
+ probe_ent->port_ops = &k2_sata_ops;
+ probe_ent->n_ports = 2;
+ probe_ent->irq = pdev->irq;
+ probe_ent->irq_flags = SA_SHIRQ;
+ probe_ent->mmio_base = mmio_base;
+
+ /*
+ * We don't care much about the PIO/UDMA masks, but the core won't like us
+ * if we don't fill these
+ */
+ probe_ent->pio_mask = 0x1f;
+ probe_ent->udma_mask = 0x7f;
+
+ k2_sata_setup_port(&probe_ent->port[0], base);
+ k2_sata_setup_port(&probe_ent->port[1], base + 0x100);
+
+ pci_set_master(pdev);
+
+ /* FIXME: check ata_device_add return value */
+ ata_device_add(probe_ent);
+ kfree(probe_ent);
+
+ return 0;
+
+err_out_unmap:
+ iounmap((void *)base);
+err_out_free_ent:
+ kfree(probe_ent);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+static struct pci_device_id k2_sata_pci_tbl[] = {
+ { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { }
+};
+
+
+static struct pci_driver k2_sata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = k2_sata_pci_tbl,
+ .probe = k2_sata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+
+static int __init k2_sata_init(void)
+{
+ int rc;
+
+ rc = pci_module_init(&k2_sata_pci_driver);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+
+static void __exit k2_sata_exit(void)
+{
+ pci_unregister_driver(&k2_sata_pci_driver);
+}
+
+
+MODULE_AUTHOR("Benjamin Herrenschmidt");
+MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
+
+module_init(k2_sata_init);
+module_exit(k2_sata_exit);
diff -Nru a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/drivers/scsi/sata_via.c Sat Oct 25 11:45:10 2003
@@ -0,0 +1,264 @@
+/*
+ sata_via.c - VIA Serial ATA controllers
+
+ Copyright 2003 Red Hat, Inc. All rights reserved.
+ Copyright 2003 Jeff Garzik
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "scsi.h"
+#include "hosts.h"
+#include
+
+#define DRV_NAME "sata_via"
+#define DRV_VERSION "0.11"
+
+enum {
+ via_sata = 0,
+};
+
+static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static void svia_sata_phy_reset(struct ata_port *ap);
+static void svia_port_disable(struct ata_port *ap);
+static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio);
+static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma);
+
+static unsigned int in_module_init = 1;
+
+static struct pci_device_id svia_pci_tbl[] = {
+ { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, via_sata },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver svia_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = svia_pci_tbl,
+ .probe = svia_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static Scsi_Host_Template svia_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = ATA_MAX_PRD,
+ .max_sectors = ATA_MAX_SECTORS,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+};
+
+static struct ata_port_operations svia_sata_ops = {
+ .port_disable = svia_port_disable,
+ .set_piomode = svia_set_piomode,
+ .set_udmamode = svia_set_udmamode,
+
+ .tf_load = ata_tf_load_pio,
+ .tf_read = ata_tf_read_pio,
+ .check_status = ata_check_status_pio,
+ .exec_command = ata_exec_command_pio,
+
+ .phy_reset = svia_sata_phy_reset,
+ .phy_config = pata_phy_config, /* not a typo */
+
+ .bmdma_start = ata_bmdma_start_pio,
+ .fill_sg = ata_fill_sg,
+ .eng_timeout = ata_eng_timeout,
+
+ .irq_handler = ata_interrupt,
+};
+
+static struct ata_port_info svia_port_info[] = {
+ /* via_sata */
+ {
+ .sht = &svia_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY
+ | ATA_FLAG_SRST,
+ .pio_mask = 0x03, /* pio3-4 */
+ .udma_mask = 0x7f, /* udma0-6 ; FIXME */
+ .port_ops = &svia_sata_ops,
+ },
+};
+
+static struct pci_bits svia_enable_bits[] = {
+ { 0x40U, 1U, 0x02UL, 0x02UL }, /* port 0 */
+ { 0x40U, 1U, 0x01UL, 0x01UL }, /* port 1 */
+};
+
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
+
+/**
+ * svia_sata_phy_reset -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ */
+
+static void svia_sata_phy_reset(struct ata_port *ap)
+{
+ if (!pci_test_config_bits(ap->host_set->pdev,
+ &svia_enable_bits[ap->port_no])) {
+ ata_port_disable(ap);
+ printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
+ return;
+ }
+
+ ata_port_probe(ap);
+ if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ return;
+
+ ata_bus_reset(ap);
+}
+
+/**
+ * svia_port_disable -
+ * @ap:
+ *
+ * LOCKING:
+ *
+ */
+
+static void svia_port_disable(struct ata_port *ap)
+{
+ ata_port_disable(ap);
+
+ /* FIXME */
+}
+
+/**
+ * svia_set_piomode -
+ * @ap:
+ * @adev:
+ * @pio:
+ *
+ * LOCKING:
+ *
+ */
+
+static void svia_set_piomode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int pio)
+{
+ /* FIXME: needed? */
+}
+
+/**
+ * svia_set_udmamode -
+ * @ap:
+ * @adev:
+ * @udma:
+ *
+ * LOCKING:
+ *
+ */
+
+static void svia_set_udmamode (struct ata_port *ap, struct ata_device *adev,
+ unsigned int udma)
+{
+ /* FIXME: needed? */
+}
+
+/**
+ * svia_init_one -
+ * @pdev:
+ * @ent:
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_port_info *port_info[1];
+ unsigned int n_ports = 1;
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ /* no hotplugging support (FIXME) */
+ if (!in_module_init)
+ return -ENODEV;
+
+ port_info[0] = &svia_port_info[ent->driver_data];
+
+ return ata_pci_init_one(pdev, port_info, n_ports);
+}
+
+/**
+ * svia_init -
+ *
+ * LOCKING:
+ *
+ * RETURNS:
+ *
+ */
+
+static int __init svia_init(void)
+{
+ int rc;
+
+ DPRINTK("pci_module_init\n");
+ rc = pci_module_init(&svia_pci_driver);
+ if (rc)
+ return rc;
+
+ in_module_init = 0;
+
+ DPRINTK("done\n");
+ return 0;
+}
+
+/**
+ * svia_exit -
+ *
+ * LOCKING:
+ *
+ */
+
+static void __exit svia_exit(void)
+{
+ pci_unregister_driver(&svia_pci_driver);
+}
+
+module_init(svia_init);
+module_exit(svia_exit);
+
diff -Nru a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
--- a/drivers/scsi/scsi.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/scsi.c Sat Oct 25 11:45:09 2003
@@ -914,9 +914,7 @@
return;
module_put(sdev->host->hostt->module);
- if (atomic_dec_and_test(&sdev->access_count))
- if (test_bit(SDEV_DEL, &sdev->sdev_state))
- device_del(&sdev->sdev_gendev);
+ atomic_dec(&sdev->access_count);
put_device(&sdev->sdev_gendev);
class_put(&sdev_class);
}
diff -Nru a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
--- a/drivers/scsi/scsi_devinfo.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/scsi_devinfo.c Sat Oct 25 11:45:09 2003
@@ -6,10 +6,11 @@
#include
#include
#include
+#include
#include "scsi.h"
+#include "hosts.h"
#include "scsi_priv.h"
-#include "scsi_devinfo.h"
/*
* scsi_dev_info_list: structure to hold black/white listed devices.
@@ -322,11 +323,17 @@
* Description:
* Search the scsi_dev_info_list for an entry matching @vendor and
* @model, if found, return the matching flags value, else return
- * scsi_default_dev_flags.
+ * the host or global default settings.
**/
-int scsi_get_device_flags(unsigned char *vendor, unsigned char *model)
+int scsi_get_device_flags(struct scsi_device *sdev, unsigned char *vendor,
+ unsigned char *model)
{
struct scsi_dev_info_list *devinfo;
+ unsigned int bflags;
+
+ bflags = sdev->host->hostt->flags;
+ if (!bflags)
+ bflags = scsi_default_dev_flags;
list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) {
if (devinfo->compatible) {
@@ -378,7 +385,7 @@
return devinfo->flags;
}
}
- return scsi_default_dev_flags;
+ return bflags;
}
#ifdef CONFIG_SCSI_PROC_FS
diff -Nru a/drivers/scsi/scsi_devinfo.h b/drivers/scsi/scsi_devinfo.h
--- a/drivers/scsi/scsi_devinfo.h Sat Oct 25 11:45:09 2003
+++ /dev/null Wed Dec 31 16:00:00 1969
@@ -1,17 +0,0 @@
-
-/*
- * Flags for SCSI devices that need special treatment
- */
-#define BLIST_NOLUN 0x001 /* Only scan LUN 0 */
-#define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning */
-#define BLIST_BORKEN 0x004 /* Flag for broken handshaking */
-#define BLIST_KEY 0x008 /* unlock by special command */
-#define BLIST_SINGLELUN 0x010 /* Do not use LUNs in parallel */
-#define BLIST_NOTQ 0x020 /* Buggy Tagged Command Queuing */
-#define BLIST_SPARSELUN 0x040 /* Non consecutive LUN numbering */
-#define BLIST_MAX5LUN 0x080 /* Avoid LUNS >= 5 */
-#define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */
-#define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */
-#define BLIST_INQUIRY_36 0x400 /* override additional length field */
-#define BLIST_INQUIRY_58 0x800 /* ... for broken inquiry responses */
-#define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */
diff -Nru a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
--- a/drivers/scsi/scsi_priv.h Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/scsi_priv.h Sat Oct 25 11:45:09 2003
@@ -85,7 +85,8 @@
extern void __scsi_release_request(struct scsi_request *sreq);
/* scsi_devinfo.c */
-extern int scsi_get_device_flags(unsigned char *vendor, unsigned char *model);
+extern int scsi_get_device_flags(struct scsi_device *sdev,
+ unsigned char *vendor, unsigned char *model);
extern int scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
diff -Nru a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
--- a/drivers/scsi/scsi_scan.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/scsi_scan.c Sat Oct 25 11:45:09 2003
@@ -35,10 +35,10 @@
#include "scsi.h"
#include "hosts.h"
#include
+#include
#include "scsi_priv.h"
#include "scsi_logging.h"
-#include "scsi_devinfo.h"
#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
" SCSI scanning, some SCSI devices might not be configured\n"
@@ -365,7 +365,7 @@
* bit fields in Scsi_Device, so bflags need not be passed as an
* argument.
*/
- *bflags |= scsi_get_device_flags(&inq_result[8], &inq_result[16]);
+ *bflags |= scsi_get_device_flags(sdev, &inq_result[8], &inq_result[16]);
possible_inq_resp_len = (unsigned char) inq_result[4] + 5;
if (BLIST_INQUIRY_36 & *bflags)
@@ -625,7 +625,15 @@
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
sdev->use_10_for_rw = 1;
- sdev->use_10_for_ms = 0;
+
+ if (*bflags & BLIST_MS_SKIP_PAGE_08)
+ sdev->skip_ms_page_8 = 1;
+
+ if (*bflags & BLIST_MS_SKIP_PAGE_3F)
+ sdev->skip_ms_page_3f = 1;
+
+ if (*bflags & BLIST_USE_10_BYTE_MS)
+ sdev->use_10_for_ms = 1;
if(sdev->host->hostt->slave_configure)
sdev->host->hostt->slave_configure(sdev);
@@ -678,7 +686,8 @@
if (sdevp)
*sdevp = sdev;
if (bflagsp)
- *bflagsp = scsi_get_device_flags(sdev->vendor,
+ *bflagsp = scsi_get_device_flags(sdev,
+ sdev->vendor,
sdev->model);
return SCSI_SCAN_LUN_PRESENT;
}
@@ -1080,8 +1089,12 @@
void scsi_rescan_device(struct device *dev)
{
- struct scsi_driver *drv = to_scsi_driver(dev->driver);
+ struct scsi_driver *drv;
+
+ if (!dev->driver)
+ return;
+ drv = to_scsi_driver(dev->driver);
if (try_module_get(drv->owner)) {
if (drv->rescan)
drv->rescan(dev);
diff -Nru a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
--- a/drivers/scsi/scsi_sysfs.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/scsi_sysfs.c Sat Oct 25 11:45:09 2003
@@ -412,8 +412,7 @@
set_bit(SDEV_DEL, &sdev->sdev_state);
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
- if (!atomic_read(&sdev->access_count))
- device_del(&sdev->sdev_gendev);
+ device_del(&sdev->sdev_gendev);
up_write(&class->subsys.rwsem);
}
diff -Nru a/drivers/scsi/sd.c b/drivers/scsi/sd.c
--- a/drivers/scsi/sd.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/sd.c Sat Oct 25 11:45:09 2003
@@ -74,9 +74,16 @@
*/
#define SD_MAX_RETRIES 5
+static void scsi_disk_release (struct kobject *kobj);
+
+static struct kobj_type scsi_disk_kobj_type = {
+ .release = scsi_disk_release,
+};
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
+ struct kobject kobj;
struct gendisk *disk;
unsigned int openers; /* protected by BKL for now, yuck */
sector_t capacity; /* size in 512-byte sectors */
@@ -87,6 +94,7 @@
unsigned RCD : 1; /* state of disk RCD bit, unused */
};
+
static unsigned long sd_index_bits[SD_DISKS / BITS_PER_LONG];
static spinlock_t sd_index_lock = SPIN_LOCK_UNLOCKED;
@@ -128,11 +136,33 @@
}
}
+#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,kobj);
+
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
return container_of(disk->private_data, struct scsi_disk, driver);
}
+static int scsi_disk_get(struct scsi_disk *sdkp)
+{
+ if (!kobject_get(&sdkp->kobj))
+ goto out;
+ if (scsi_device_get(sdkp->device))
+ goto out_put_kobj;
+ return 0;
+
+out_put_kobj:
+ kobject_put(&sdkp->kobj);
+out:
+ return -ENXIO;
+}
+
+static void scsi_disk_put(struct scsi_disk *sdkp)
+{
+ scsi_device_put(sdkp->device);
+ kobject_put(&sdkp->kobj);
+}
+
/**
* sd_init_command - build a scsi (read or write) command from
* information in the request structure.
@@ -352,15 +382,17 @@
{
struct gendisk *disk = inode->i_bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdev = sdkp->device;
+ struct scsi_device *sdev;
int retval;
SCSI_LOG_HLQUEUE(3, printk("sd_open: disk=%s\n", disk->disk_name));
- retval = scsi_device_get(sdev);
+ retval = scsi_disk_get(sdkp);
if (retval)
return retval;
+ sdev = sdkp->device;
+
/*
* If the device is in error recovery, wait until it is done.
* If the device is offline, then disallow any access to it.
@@ -406,7 +438,7 @@
return 0;
error_out:
- scsi_device_put(sdev);
+ scsi_disk_put(sdkp);
return retval;
}
@@ -438,7 +470,7 @@
* XXX and what if there are packets in flight and this close()
* XXX is followed by a "rmmod sd_mod"?
*/
- scsi_device_put(sdev);
+ scsi_disk_put(sdkp);
return 0;
}
@@ -1057,6 +1089,11 @@
int res;
struct scsi_mode_data data;
+ if (sdkp->device->skip_ms_page_3f) {
+ printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname);
+ return;
+ }
+
/*
* First attempt: ask for all pages (0x3F), but only 4 bytes.
* We have to start carefully: some devices hang if we ask
@@ -1103,6 +1140,8 @@
const int modepage = 0x08; /* current values, cache page */
struct scsi_mode_data data;
+ if (sdkp->device->skip_ms_page_8)
+ goto defaults;
/* cautiously ask */
res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, 4, &data);
@@ -1160,6 +1199,8 @@
printk(KERN_ERR "%s: asking for cache data failed\n",
diskname);
}
+
+defaults:
printk(KERN_ERR "%s: assuming drive cache: write through\n",
diskname);
sdkp->WCE = 0;
@@ -1270,6 +1311,10 @@
if (!sdkp)
goto out;
+ memset (sdkp, 0, sizeof(*sdkp));
+ kobject_init(&sdkp->kobj);
+ sdkp->kobj.ktype = &scsi_disk_kobj_type;
+
gd = alloc_disk(16);
if (!gd)
goto out_free;
@@ -1348,16 +1393,27 @@
struct scsi_disk *sdkp = dev_get_drvdata(dev);
del_gendisk(sdkp->disk);
+ sd_shutdown(dev);
+ kobject_put(&sdkp->kobj);
+
+ return 0;
+}
+
+/**
+ * scsi_disk_release - Called to free the scsi_disk structure
+ * @kobj: pointer to embedded kobject
+ **/
+static void scsi_disk_release(struct kobject *kobj)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(kobj);
+
+ put_disk(sdkp->disk);
spin_lock(&sd_index_lock);
clear_bit(sdkp->index, sd_index_bits);
spin_unlock(&sd_index_lock);
- sd_shutdown(dev);
- put_disk(sdkp->disk);
kfree(sdkp);
-
- return 0;
}
/*
diff -Nru a/drivers/scsi/sg.c b/drivers/scsi/sg.c
--- a/drivers/scsi/sg.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/sg.c Sat Oct 25 11:45:09 2003
@@ -877,6 +877,8 @@
result = get_user(val, (int *) arg);
if (result)
return result;
+ if (val < 0)
+ return -EINVAL;
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
diff -Nru a/drivers/scsi/sr.c b/drivers/scsi/sr.c
--- a/drivers/scsi/sr.c Sat Oct 25 11:45:09 2003
+++ b/drivers/scsi/sr.c Sat Oct 25 11:45:09 2003
@@ -289,12 +289,12 @@
return 0;
memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
- if (rq_data_dir(rq) == WRITE)
+ if (!rq->data_len)
+ SCpnt->sc_data_direction = SCSI_DATA_NONE;
+ else if (rq_data_dir(rq) == WRITE)
SCpnt->sc_data_direction = SCSI_DATA_WRITE;
- else if (rq->data_len)
- SCpnt->sc_data_direction = SCSI_DATA_READ;
else
- SCpnt->sc_data_direction = SCSI_DATA_NONE;
+ SCpnt->sc_data_direction = SCSI_DATA_READ;
this_count = rq->data_len;
if (rq->timeout)
diff -Nru a/drivers/serial/8250.c b/drivers/serial/8250.c
--- a/drivers/serial/8250.c Sat Oct 25 11:45:09 2003
+++ b/drivers/serial/8250.c Sat Oct 25 11:45:09 2003
@@ -2086,6 +2086,9 @@
int __init early_serial_setup(struct uart_port *port)
{
+ if (port->line >= ARRAY_SIZE(serial8250_ports))
+ return -ENODEV;
+
serial8250_isa_init_ports();
serial8250_ports[port->line].port = *port;
serial8250_ports[port->line].port.ops = &serial8250_pops;
diff -Nru a/drivers/usb/class/audio.c b/drivers/usb/class/audio.c
--- a/drivers/usb/class/audio.c Sat Oct 25 11:45:10 2003
+++ b/drivers/usb/class/audio.c Sat Oct 25 11:45:10 2003
@@ -2007,6 +2007,8 @@
if (cmd == SOUND_MIXER_INFO) {
mixer_info info;
+
+ memset(&info, 0, sizeof(info));
strncpy(info.id, "USB_AUDIO", sizeof(info.id));
strncpy(info.name, "USB Audio Class Driver", sizeof(info.name));
info.modify_counter = ms->modcnt;
@@ -2016,6 +2018,8 @@
}
if (cmd == SOUND_OLD_MIXER_INFO) {
_old_mixer_info info;
+
+ memset(&info, 0, sizeof(info));
strncpy(info.id, "USB_AUDIO", sizeof(info.id));
strncpy(info.name, "USB Audio Class Driver", sizeof(info.name));
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff -Nru a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
--- a/drivers/usb/class/cdc-acm.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/class/cdc-acm.c Sat Oct 25 11:45:09 2003
@@ -1,5 +1,5 @@
/*
- * acm.c Version 0.21
+ * acm.c Version 0.22
*
* Copyright (c) 1999 Armin Fuerst
* Copyright (c) 1999 Pavel Machek
@@ -24,6 +24,8 @@
* v0.19 - fixed CLOCAL handling (thanks to Richard Shih-Ping Chan)
* v0.20 - switched to probing on interface (rather than device) class
* v0.21 - revert to probing on device for devices with multiple configs
+ * v0.22 - probe only the control interface. if usbcore doesn't choose the
+ * config we want, sysadmin changes bConfigurationValue in sysfs.
*/
/*
@@ -139,7 +141,8 @@
struct acm {
struct usb_device *dev; /* the corresponding usb device */
- struct usb_interface *iface; /* the interfaces - +0 control +1 data */
+ struct usb_interface *control; /* control interface */
+ struct usb_interface *data; /* data interface */
struct tty_struct *tty; /* the corresponding tty */
struct urb *ctrlurb, *readurb, *writeurb; /* urbs */
struct acm_line line; /* line coding (bits, stop, parity) */
@@ -167,12 +170,15 @@
{
int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
- acm->iface[0].altsetting[0].desc.bInterfaceNumber,
+ acm->control->altsetting[0].desc.bInterfaceNumber,
buf, len, HZ * 5);
dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d", request, value, len, retval);
return retval < 0 ? retval : 0;
}
+/* devices aren't required to support these requests.
+ * the cdc acm descriptor tells whether they do...
+ */
#define acm_set_control(acm, control) acm_ctrl_msg(acm, ACM_REQ_SET_CONTROL, control, NULL, 0)
#define acm_set_line(acm, line) acm_ctrl_msg(acm, ACM_REQ_SET_LINE, 0, line, sizeof(struct acm_line))
#define acm_send_break(acm, ms) acm_ctrl_msg(acm, ACM_REQ_SEND_BREAK, ms, NULL, 0)
@@ -211,7 +217,7 @@
case ACM_IRQ_NETWORK:
- dbg("%s network", data[0] ? "connected to" : "disconnected from");
+ dbg("%s network", dr->wValue ? "connected to" : "disconnected from");
break;
case ACM_IRQ_LINE_STATE:
@@ -546,17 +552,15 @@
struct usb_device *dev;
struct acm *acm;
struct usb_host_config *cfacm;
+ struct usb_interface *data;
struct usb_host_interface *ifcom, *ifdata;
struct usb_endpoint_descriptor *epctrl, *epread, *epwrite;
- int readsize, ctrlsize, minor, i, j;
+ int readsize, ctrlsize, minor, j;
unsigned char *buf;
dev = interface_to_usbdev (intf);
- for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
-
- cfacm = dev->config + i;
- dbg("probing config %d", cfacm->desc.bConfigurationValue);
+ cfacm = dev->actconfig;
for (j = 0; j < cfacm->desc.bNumInterfaces - 1; j++) {
@@ -564,19 +568,23 @@
usb_interface_claimed(cfacm->interface[j + 1]))
continue;
- ifcom = cfacm->interface[j]->altsetting + 0;
- ifdata = cfacm->interface[j + 1]->altsetting + 0;
-
- if (ifdata->desc.bInterfaceClass != 10 || ifdata->desc.bNumEndpoints < 2) {
- ifcom = cfacm->interface[j + 1]->altsetting + 0;
+ /* We know we're probe()d with the control interface.
+ * FIXME ACM doesn't guarantee the data interface is
+ * adjacent to the control interface, or that if one
+ * is there it's not for call management ... so use
+ * the cdc union descriptor whenever there is one.
+ */
+ ifcom = intf->altsetting + 0;
+ if (intf == cfacm->interface[j]) {
+ ifdata = cfacm->interface[j + 1]->altsetting + 0;
+ data = cfacm->interface[j + 1];
+ } else if (intf == cfacm->interface[j + 1]) {
ifdata = cfacm->interface[j]->altsetting + 0;
- if (ifdata->desc.bInterfaceClass != 10 || ifdata->desc.bNumEndpoints < 2)
- continue;
- }
+ data = cfacm->interface[j];
+ } else
+ continue;
- if (ifcom->desc.bInterfaceClass != 2 || ifcom->desc.bInterfaceSubClass != 2 ||
- ifcom->desc.bInterfaceProtocol < 1 || ifcom->desc.bInterfaceProtocol > 6 ||
- ifcom->desc.bNumEndpoints < 1)
+ if (ifdata->desc.bInterfaceClass != 10 || ifdata->desc.bNumEndpoints < 2)
continue;
epctrl = &ifcom->endpoint[0].desc;
@@ -593,15 +601,6 @@
epwrite = &ifdata->endpoint[0].desc;
}
- /* FIXME don't scan every config. it's either correct
- * when we probe(), or some other task must fix this.
- */
- if (dev->actconfig != cfacm) {
- err("need inactive config #%d",
- cfacm->desc.bConfigurationValue);
- return -ENODEV;
- }
-
for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
if (acm_table[minor]) {
err("no more free acm devices");
@@ -617,7 +616,8 @@
ctrlsize = epctrl->wMaxPacketSize;
readsize = epread->wMaxPacketSize;
acm->writesize = epwrite->wMaxPacketSize;
- acm->iface = cfacm->interface[j];
+ acm->control = intf;
+ acm->data = data;
acm->minor = minor;
acm->dev = dev;
@@ -665,7 +665,7 @@
buf += readsize, acm->writesize, acm_write_bulk, acm);
acm->writeurb->transfer_flags |= URB_NO_FSBR;
- info("ttyACM%d: USB ACM device", minor);
+ dev_info(&intf->dev, "ttyACM%d: USB ACM device", minor);
acm_set_control(acm, acm->ctrlout);
@@ -673,8 +673,7 @@
acm->line.databits = 8;
acm_set_line(acm, &acm->line);
- usb_driver_claim_interface(&acm_driver, acm->iface + 0, acm);
- usb_driver_claim_interface(&acm_driver, acm->iface + 1, acm);
+ usb_driver_claim_interface(&acm_driver, data, acm);
tty_register_device(acm_tty_driver, minor, &intf->dev);
@@ -682,7 +681,6 @@
usb_set_intfdata (intf, acm);
return 0;
}
- }
return -EIO;
}
@@ -705,8 +703,7 @@
kfree(acm->ctrlurb->transfer_buffer);
- usb_driver_release_interface(&acm_driver, acm->iface + 0);
- usb_driver_release_interface(&acm_driver, acm->iface + 1);
+ usb_driver_release_interface(&acm_driver, acm->data);
if (!acm->used) {
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -727,8 +724,15 @@
*/
static struct usb_device_id acm_ids[] = {
- { USB_DEVICE_INFO(USB_CLASS_COMM, 0, 0) },
- { USB_DEVICE_INFO(USB_CLASS_COMM, 2, 0) },
+ /* control interfaces with various AT-command sets */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, 2, 1) },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, 2, 2) },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, 2, 3) },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, 2, 4) },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, 2, 5) },
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, 2, 6) },
+
+ /* NOTE: COMM/2/0xff is likely MSFT RNDIS ... NOT a modem!! */
{ }
};
@@ -736,7 +740,7 @@
static struct usb_driver acm_driver = {
.owner = THIS_MODULE,
- .name = "acm",
+ .name = "cdc_acm",
.probe = acm_probe,
.disconnect = acm_disconnect,
.id_table = acm_ids,
diff -Nru a/drivers/usb/core/message.c b/drivers/usb/core/message.c
--- a/drivers/usb/core/message.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/core/message.c Sat Oct 25 11:45:09 2003
@@ -1001,8 +1001,10 @@
int i, retval;
struct usb_host_config *config;
- /* dev->serialize guards all config changes */
- down(&dev->serialize);
+ /* caller must own dev->serialize (config won't change)
+ * and the usb bus readlock (so driver bindings are stable);
+ * so calls during probe() are fine
+ */
for (i = 1; i < 16; ++i) {
usb_disable_endpoint(dev, i);
@@ -1016,7 +1018,7 @@
NULL, 0, HZ * USB_CTRL_SET_TIMEOUT);
if (retval < 0) {
dev->state = USB_STATE_ADDRESS;
- goto done;
+ return retval;
}
dev->toggle[0] = dev->toggle[1] = 0;
@@ -1029,9 +1031,7 @@
intf->act_altsetting = 0;
usb_enable_interface(dev, intf);
}
-done:
- up(&dev->serialize);
- return (retval < 0) ? retval : 0;
+ return 0;
}
/**
diff -Nru a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
--- a/drivers/usb/core/usb.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/core/usb.c Sat Oct 25 11:45:09 2003
@@ -1063,6 +1063,9 @@
goto fail;
}
if (dev->speed == USB_SPEED_FULL) {
+ usb_disable_endpoint(dev, 0);
+ usb_endpoint_running(dev, 0, 1);
+ usb_endpoint_running(dev, 0, 0);
dev->epmaxpacketin [0] = dev->descriptor.bMaxPacketSize0;
dev->epmaxpacketout[0] = dev->descriptor.bMaxPacketSize0;
}
diff -Nru a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
--- a/drivers/usb/gadget/inode.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/gadget/inode.c Sat Oct 25 11:45:09 2003
@@ -812,7 +812,7 @@
if (dev->setup_out_error)
retval = -EIO;
else {
- len = min (len, dev->req->actual);
+ len = min (len, (size_t)dev->req->actual);
// FIXME don't call this with the spinlock held ...
if (copy_to_user (buf, &dev->req->buf, len))
retval = -EFAULT;
@@ -1670,7 +1670,7 @@
fail:
spin_unlock_irq (&dev->lock);
- pr_debug ("%s: %s fail %d, %p\n", shortname, __FUNCTION__, value, dev);
+ pr_debug ("%s: %s fail %Zd, %p\n", shortname, __FUNCTION__, value, dev);
kfree (dev->buf);
dev->buf = 0;
return value;
diff -Nru a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c
--- a/drivers/usb/media/vicam.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/media/vicam.c Sat Oct 25 11:45:09 2003
@@ -539,6 +539,7 @@
struct video_capability b;
DBG("VIDIOCGCAP\n");
+ memset(&b, 0, sizeof(b));
strcpy(b.name, "ViCam-based Camera");
b.type = VID_TYPE_CAPTURE;
b.channels = 1;
diff -Nru a/drivers/usb/misc/brlvger.c b/drivers/usb/misc/brlvger.c
--- a/drivers/usb/misc/brlvger.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/misc/brlvger.c Sat Oct 25 11:45:09 2003
@@ -711,6 +711,7 @@
case BRLVGER_GET_INFO: {
struct brlvger_info vi;
+ memset(&vi, 0, sizeof(vi));
strlcpy(vi.driver_version, DRIVER_VERSION,
sizeof(vi.driver_version));
strlcpy(vi.driver_banner, longbanner,
diff -Nru a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
--- a/drivers/usb/net/usbnet.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/net/usbnet.c Sat Oct 25 11:45:09 2003
@@ -493,8 +493,11 @@
(void *)req, data, size,
ax8817x_async_cmd_callback, req);
- if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0)
+ if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
deverr(dev, "Error submitting the control message: status=%d", status);
+ kfree(req);
+ usb_free_urb(urb);
+ }
}
static void ax8817x_set_multicast(struct net_device *net)
@@ -514,7 +517,7 @@
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- u8 *multi_filter = (u8 *)dev->data;
+ u8 *multi_filter = (u8 *)&dev->data;
struct dev_mc_list *mc_list = net->mc_list;
u32 crc_bits;
int i;
diff -Nru a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
--- a/drivers/usb/serial/digi_acceleport.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/serial/digi_acceleport.c Sat Oct 25 11:45:09 2003
@@ -1728,8 +1728,8 @@
init_waitqueue_head( &priv->dp_flush_wait );
priv->dp_in_close = 0;
init_waitqueue_head( &priv->dp_close_wait );
- INIT_WORK(&priv->dp_wakeup_work, (void *)digi_wakeup_write_lock,
- (void *)(&serial->port[i]));
+ INIT_WORK(&priv->dp_wakeup_work,
+ digi_wakeup_write_lock, serial->port[i]);
/* initialize write wait queue for this port */
init_waitqueue_head( &serial->port[i]->write_wait );
diff -Nru a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
--- a/drivers/usb/serial/ftdi_sio.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/serial/ftdi_sio.c Sat Oct 25 11:45:09 2003
@@ -17,6 +17,15 @@
* See http://ftdi-usb-sio.sourceforge.net for upto date testing info
* and extra documentation
*
+ * (21/Oct/2003) Ian Abbott
+ * Renamed some VID/PID macros for Matrix Orbital and Perle Systems
+ * devices. Removed Matrix Orbital and Perle Systems devices from the
+ * 8U232AM device table, but left them in the FT232BM table, as they are
+ * known to use only FT232BM.
+ *
+ * (17/Oct/2003) Scott Allen
+ * Added vid/pid for Perle Systems UltraPort USB serial converters
+ *
* (21/Sep/2003) Ian Abbott
* Added VID/PID for Omnidirectional Control Technology US101 USB to
* RS-232 adapter (also rebadged as Dick Smith Electronics XH6381).
@@ -285,13 +294,6 @@
{ USB_DEVICE_VER(FTDI_VID, FTDI_XF_642_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(FTDI_VID, FTDI_VNHCPCUSB_D_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(FTDI_VID, FTDI_DSS20_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_0_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_1_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_2_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_3_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_4_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_5_PID, 0, 0x3ff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_6_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(SEALEVEL_VID, SEALEVEL_2101_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(SEALEVEL_VID, SEALEVEL_2102_PID, 0, 0x3ff) },
{ USB_DEVICE_VER(SEALEVEL_VID, SEALEVEL_2103_PID, 0, 0x3ff) },
@@ -358,13 +360,14 @@
{ USB_DEVICE_VER(FTDI_VID, FTDI_XF_642_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(FTDI_VID, FTDI_VNHCPCUSB_D_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(FTDI_VID, FTDI_DSS20_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_0_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_1_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_2_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_3_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_4_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_5_PID, 0x400, 0xffff) },
- { USB_DEVICE_VER(FTDI_MTXORB_VID, FTDI_MTXORB_6_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_0_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_1_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_2_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_3_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_4_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_5_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_6_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(SEALEVEL_VID, SEALEVEL_2101_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(SEALEVEL_VID, SEALEVEL_2102_PID, 0x400, 0xffff) },
{ USB_DEVICE_VER(SEALEVEL_VID, SEALEVEL_2103_PID, 0x400, 0xffff) },
@@ -444,13 +447,14 @@
{ USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) },
{ USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_0_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_1_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_2_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_3_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_4_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_5_PID) },
- { USB_DEVICE(FTDI_MTXORB_VID, FTDI_MTXORB_6_PID) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_0_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_1_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_2_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_3_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_4_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_5_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_MTXORB_6_PID, 0x400, 0xffff) },
+ { USB_DEVICE_VER(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID, 0x400, 0xffff) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) },
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) },
diff -Nru a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
--- a/drivers/usb/serial/ftdi_sio.h Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/serial/ftdi_sio.h Sat Oct 25 11:45:09 2003
@@ -50,7 +50,6 @@
* The following are the values for the Matrix Orbital LCD displays,
* which are the FT232BM ( similar to the 8U232AM )
*/
-#define FTDI_MTXORB_VID FTDI_VID /* Matrix Orbital Product Id */
#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
@@ -58,6 +57,12 @@
#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
+
+/*
+ * The following are the values for the Perle Systems
+ * UltraPort USB serial converters
+ */
+#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
/*
* The following are the values for the Sealevel SeaLINK+ adapters.
diff -Nru a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
--- a/drivers/usb/serial/io_edgeport.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/serial/io_edgeport.c Sat Oct 25 11:45:09 2003
@@ -1906,6 +1906,7 @@
case TIOCGICOUNT:
cnow = edge_port->icount;
+ memset(&icount, 0, sizeof(icount));
icount.cts = cnow.cts;
icount.dsr = cnow.dsr;
icount.rng = cnow.rng;
diff -Nru a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
--- a/drivers/usb/storage/scsiglue.c Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/storage/scsiglue.c Sat Oct 25 11:45:09 2003
@@ -51,6 +51,7 @@
#include
#include
+#include
/***********************************************************************
@@ -64,10 +65,6 @@
static int slave_configure (struct scsi_device *sdev)
{
- /* set device to use 10-byte commands where possible */
- sdev->use_10_for_ms = 1;
- sdev->use_10_for_rw = 1;
-
/* this is to satisify the compiler, tho I don't think the
* return code is ever checked anywhere. */
return 0;
@@ -323,6 +320,9 @@
/* emulated HBA */
.emulated = TRUE,
+
+ /* modify scsi_device bits on probe */
+ .flags = (BLIST_MS_SKIP_PAGE_08 | BLIST_USE_10_BYTE_MS),
/* module management */
.module = THIS_MODULE
diff -Nru a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
--- a/drivers/usb/storage/unusual_devs.h Sat Oct 25 11:45:09 2003
+++ b/drivers/usb/storage/unusual_devs.h Sat Oct 25 11:45:09 2003
@@ -548,10 +548,10 @@
* - They don't like the INQUIRY command. So we must handle this command
* of the SCSI layer ourselves.
*/
-UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x5009,
+UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999,
"Casio",
"QV DigitalCamera",
- US_SC_8070, US_PR_CB, NULL,
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
/* Submitted by Hartmut Wahl */
@@ -631,6 +631,16 @@
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY),
+/*
+ * Entry for Jenoptik JD 5200z3
+ *
+ * email: car.busse@gmx.de
+ */
+UNUSUAL_DEV( 0x0d96, 0x5200, 0x0001, 0x0200,
+ "Jenoptik",
+ "JD 5200 z3",
+ US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
+
/* Reported by Kevin Cernekee
* Tested on hardware version 1.10.
* Entry is needed only for the initializer function override.
diff -Nru a/drivers/video/acornfb.c b/drivers/video/acornfb.c
--- a/drivers/video/acornfb.c Sat Oct 25 11:45:09 2003
+++ b/drivers/video/acornfb.c Sat Oct 25 11:45:09 2003
@@ -1415,7 +1415,7 @@
fb_info.monspecs.vfmin, fb_info.monspecs.vfmax,
fb_info.monspecs.dpms ? ", DPMS" : "");
- if (fb_set_var(&fb_info.var, &fb_info))
+ if (fb_set_var(&fb_info, &fb_info.var))
printk(KERN_ERR "Acornfb: unable to set display parameters\n");
if (register_framebuffer(&fb_info) < 0)
diff -Nru a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
--- a/fs/befs/linuxvfs.c Sat Oct 25 11:45:09 2003
+++ b/fs/befs/linuxvfs.c Sat Oct 25 11:45:09 2003
@@ -673,14 +673,15 @@
*
*/
enum {
- Opt_uid, Opt_gid, Opt_charset, Opt_debug,
+ Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
};
static match_table_t befs_tokens = {
{Opt_uid, "uid=%d"},
{Opt_gid, "gid=%d"},
{Opt_charset, "iocharset=%s"},
- {Opt_debug, "debug"}
+ {Opt_debug, "debug"},
+ {Opt_err, NULL}
};
static int
diff -Nru a/fs/binfmt_elf.c b/fs/binfmt_elf.c
--- a/fs/binfmt_elf.c Sat Oct 25 11:45:09 2003
+++ b/fs/binfmt_elf.c Sat Oct 25 11:45:09 2003
@@ -602,6 +602,10 @@
// printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
interpreter_type = INTERPRETER_ELF;
}
+ /* Verify the interpreter has a valid arch */
+ if ((interpreter_type == INTERPRETER_ELF) &&
+ !elf_check_arch(&interp_elf_ex))
+ goto out_free_dentry;
} else {
/* Executables without an interpreter also need a personality */
SET_PERSONALITY(elf_ex, ibcs2_interpreter);
diff -Nru a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS
--- a/fs/cifs/AUTHORS Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/AUTHORS Sat Oct 25 11:45:09 2003
@@ -20,6 +20,7 @@
Zwane Mwaikambo
Andi Kleen
Amrut Joshi
+Shobhit Dayal
Test case and Bug Report contributors
-------------------------------------
diff -Nru a/fs/cifs/CHANGES b/fs/cifs/CHANGES
--- a/fs/cifs/CHANGES Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/CHANGES Sat Oct 25 11:45:09 2003
@@ -1,3 +1,20 @@
+Version 0.94
+------------
+Fix to list processing in reopen_files. Fix reconnection when server hung
+but tcpip session still alive. Set proper timeout on socket read.
+
+Version 0.93
+------------
+Add missing mount options including iocharset. SMP fixes in write and open.
+Fix errors in reconnecting after TCP session failure. Fix module unloading
+of default nls codepage
+
+Version 0.92
+------------
+Active smb transactions should never go negative (fix double FreeXid). Fix
+list processing in file routines. Check return code on kmalloc in open.
+Fix spinlock usage for SMP.
+
Version 0.91
------------
Fix oops in reopen_files when invalid dentry. drop dentry on server rename
diff -Nru a/fs/cifs/README b/fs/cifs/README
--- a/fs/cifs/README Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/README Sat Oct 25 11:45:09 2003
@@ -127,6 +127,13 @@
this overrides the default mode for directory inodes.
port attempt to contact the server on this tcp port, before
trying the usual ports (port 445, then 139).
+ iocharset Codepage used to convert local path names to and from
+ Unicode. Unicode is used by default for network path
+ names if the server supports it. If iocharset is
+ not specified then the nls_default specified
+ during the local client kernel build will be used.
+ If server does not support Unicode, this parameter is
+ unused.
rsize default read size
wsize default write size
rw mount the network share read-write (note that the
diff -Nru a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
--- a/fs/cifs/cifs_debug.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/cifs_debug.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs_debug.c
*
- * Copyright (c) International Business Machines Corp., 2000,2002
+ * Copyright (C) International Business Machines Corp., 2000,2003
*
* Modified by Steve French (sfrench@us.ibm.com)
*
@@ -84,12 +84,12 @@
ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
length =
sprintf(buf,
- "\n%d) Name: %s Domain: %s Mounts: %d ServerOS: %s ServerNOS: %s\n\tCapabilities: 0x%x",
+ "\n%d) Name: %s Domain: %s Mounts: %d ServerOS: %s \n\tServerNOS: %s\tCapabilities: 0x%x\n\tSMB session status: %d\tTCP session status: %d",
i, ses->serverName, ses->serverDomain, atomic_read(&ses->inUse),
- ses->serverOS, ses->serverNOS, ses->capabilities);
+ ses->serverOS, ses->serverNOS, ses->capabilities,ses->status,ses->server->tcpStatus);
buf += length;
if(ses->server)
- buf += sprintf(buf, "\tLocal Users To Same Server: %d SecMode: 0x%x",
+ buf += sprintf(buf, "\n\tLocal Users To Same Server: %d SecMode: 0x%x",
atomic_read(&ses->server->socketUseCount),ses->server->secMode);
}
read_unlock(&GlobalSMBSeslock);
@@ -106,13 +106,13 @@
tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList);
length =
sprintf(buf,
- "\n%d) %s Uses: %d on FS: %s with characteristics: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d",
+ "\n%d) %s Uses: %d on FS: %s with characteristics: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d",
i, tcon->treeName,
atomic_read(&tcon->useCount),
tcon->nativeFileSystem,
tcon->fsDevInfo.DeviceCharacteristics,
tcon->fsAttrInfo.Attributes,
- tcon->fsAttrInfo.MaxPathNameComponentLength);
+ tcon->fsAttrInfo.MaxPathNameComponentLength,tcon->tidStatus);
buf += length;
if (tcon->fsDevInfo.DeviceType == FILE_DEVICE_DISK)
length = sprintf(buf, " type: DISK ");
@@ -123,6 +123,8 @@
sprintf(buf, " type: %d ",
tcon->fsDevInfo.DeviceType);
buf += length;
+ if(tcon->tidStatus == CifsNeedReconnect)
+ buf += sprintf(buf, "\tDISCONNECTED ");
}
read_unlock(&GlobalSMBSeslock);
length = sprintf(buf, "\n");
diff -Nru a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
--- a/fs/cifs/cifsencrypt.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/cifsencrypt.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsencrypt.c
*
- * Copyright (c) International Business Machines Corp., 2003
+ * Copyright (C) International Business Machines Corp., 2003
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -64,13 +64,13 @@
if((le32_to_cpu(cifs_pdu->Flags2) & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(ses->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
*pexpected_response_sequence_number = ses->sequence_number++;
ses->sequence_number++;
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
rc = cifs_calculate_signature(cifs_pdu, ses->mac_signing_key,smb_signature);
if(rc)
diff -Nru a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
--- a/fs/cifs/cifsfs.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/cifsfs.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsfs.c
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (C) International Business Machines Corp., 2002,2003
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Common Internet FileSystem (CIFS) client
@@ -81,7 +81,6 @@
cifs_sb = CIFS_SB(sb);
if(cifs_sb == NULL)
return -ENOMEM;
- cifs_sb->local_nls = load_nls_default(); /* needed for ASCII cp to Unicode converts */
rc = cifs_mount(sb, cifs_sb, data, devname);
@@ -571,8 +570,6 @@
static int cifs_oplock_thread(void * dummyarg)
{
- struct list_head * tmp;
- struct list_head * tmp1;
struct oplock_q_entry * oplock_item;
struct cifsTconInfo *pTcon;
struct inode * inode;
@@ -585,19 +582,22 @@
oplockThread = current;
do {
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(100*HZ);
- /* BB add missing code */
- write_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp1, &GlobalOplock_Q) {
- oplock_item = list_entry(tmp, struct oplock_q_entry,
- qhead);
+
+ schedule_timeout(39*HZ);
+ spin_lock(&GlobalMid_Lock);
+ if(list_empty(&GlobalOplock_Q)) {
+ spin_unlock(&GlobalMid_Lock);
+ schedule_timeout(39*HZ);
+ } else {
+ oplock_item = list_entry(GlobalOplock_Q.next,
+ struct oplock_q_entry, qhead);
if(oplock_item) {
pTcon = oplock_item->tcon;
inode = oplock_item->pinode;
netfid = oplock_item->netfid;
+ spin_unlock(&GlobalMid_Lock);
DeleteOplockQEntry(oplock_item);
- write_unlock(&GlobalMid_Lock);
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode))
rc = filemap_fdatawrite(inode->i_mapping);
else
rc = 0;
@@ -609,11 +609,9 @@
0, LOCKING_ANDX_OPLOCK_RELEASE,
0 /* wait flag */);
cFYI(1,("Oplock release rc = %d ",rc));
- write_lock(&GlobalMid_Lock);
} else
- break;
+ spin_unlock(&GlobalMid_Lock);
}
- write_unlock(&GlobalMid_Lock);
} while(!signal_pending(current));
complete_and_exit (&cifs_oplock_exited, 0);
}
@@ -640,7 +638,7 @@
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
GlobalSMBSeslock = RW_LOCK_UNLOCKED;
- GlobalMid_Lock = RW_LOCK_UNLOCKED;
+ GlobalMid_Lock = SPIN_LOCK_UNLOCKED;
rc = cifs_init_inodecache();
if (!rc) {
diff -Nru a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
--- a/fs/cifs/cifsglob.h Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/cifsglob.h Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsglob.h
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (C) International Business Machines Corp., 2002,2003
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -212,6 +212,7 @@
int endOfSearch:1; /* we have reached end of search */
int closePend:1; /* file is marked to close */
int emptyDir:1;
+ int invalidHandle:1; /* file closed via session abend */
char * search_resume_name;
unsigned int resume_name_length;
__u32 resume_key;
@@ -294,7 +295,27 @@
* following to be declared.
*/
-/* BB Every global should have an associated mutex for safe update BB */
+/****************************************************************************
+ * Locking notes. All updates to global variables and lists should be
+ * protected by spinlocks or semaphores.
+ *
+ * Spinlocks
+ * ---------
+ * GlobalMid_Lock protects:
+ * list operations on pending_mid_q and oplockQ
+ * updates to XID counters, multiplex id and SMB sequence numbers
+ * GlobalSMBSesLock protects:
+ * list operations on tcp and SMB session lists and tCon lists
+ * f_owner.lock protects certain per file struct operations
+ * mapping->page_lock protects certain per page operations
+ *
+ * Semaphores
+ * ----------
+ * sesSem operations on smb session
+ * tconSem operations on tree connection
+ * i_sem inode operations
+ *
+ ****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
#define GLOBAL_EXTERN
@@ -327,7 +348,7 @@
GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
-GLOBAL_EXTERN rwlock_t GlobalMid_Lock; /* protects above and list operations */
+GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above and list operations */
/* on midQ entries */
GLOBAL_EXTERN char Local_System_Name[15];
diff -Nru a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
--- a/fs/cifs/cifssmb.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/cifssmb.c Sat Oct 25 11:45:09 2003
@@ -62,6 +62,7 @@
if(!rc)
reopen_files(tcon,nls_codepage);
}
+ unload_nls(nls_codepage);
}
}
if(rc)
@@ -821,7 +822,6 @@
} else {
len_of_str = cifs_strtoUCS((wchar_t *) rename_info->target_name, target_name, 530, nls_codepage);
}
- cFYI(1,("len of str: %d", len_of_str)); /* BB removeme BB */
rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
pSMB->DataCount = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str) + 2;
pSMB->ByteCount += pSMB->DataCount;
diff -Nru a/fs/cifs/connect.c b/fs/cifs/connect.c
--- a/fs/cifs/connect.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/connect.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (C) International Business Machines Corp., 2002,2003
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -52,6 +52,8 @@
char *domainname;
char *UNC;
char *UNCip;
+ char *iocharset; /* local code page for mapping to and from Unicode */
+ char *source_rfc1001_name; /* netbios name of client */
uid_t linux_uid;
gid_t linux_gid;
mode_t file_mode;
@@ -59,6 +61,7 @@
int rw;
unsigned int rsize;
unsigned int wsize;
+ unsigned int sockopt;
unsigned short int port;
};
@@ -81,7 +84,9 @@
struct list_head *tmp;
struct cifsSesInfo *ses;
struct cifsTconInfo *tcon;
-
+
+ if(server->tcpStatus == CifsExiting)
+ return rc;
server->tcpStatus = CifsNeedReconnect;
server->maxBuf = 0;
@@ -182,23 +187,26 @@
sizeof (struct smb_hdr) -
1 /* RFC1001 header and SMB header */ ,
MSG_PEEK /* flags see socket.h */ );
- if (length < 0) {
- if (length == -ECONNRESET) {
- cERROR(1, ("Connection reset by peer "));
- cifs_reconnect(server);
- csocket = server->ssocket;
- continue;
- } else { /* find define for the -512 returned at unmount time */
- cFYI(1,("Error on sock_recvmsg(peek) length = %d",
- length));
- }
+
+ if(server->tcpStatus == CifsExiting) {
break;
- } else if (length == 0) {
- cFYI(1,("Zero length peek received - dead session?"));
+ } else if (server->tcpStatus == CifsNeedReconnect) {
+ cFYI(1,("Reconnecting after server stopped responding"));
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ continue;
+ } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
+ schedule_timeout(1); /* minimum sleep to prevent looping
+ allowing socket to clear and app threads to set
+ tcpStatus CifsNeedReconnect if server hung */
+ continue;
+ } else if (length <= 0) {
+ cFYI(1,("Reconnecting after unexpected rcvmsg error "));
cifs_reconnect(server);
csocket = server->ssocket;
continue;
}
+
pdu_length = 4 + ntohl(smb_buffer->smb_buf_length);
cFYI(1, ("Peek length rcvd: %d with smb length: %d", length, pdu_length));
@@ -222,7 +230,9 @@
cERROR(1,
("Unknown RFC 1001 frame received not 0x00 nor 0x85"));
cifs_dump_mem(" Received Data is: ", temp, length);
- break;
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ continue;
} else {
if ((length != sizeof (struct smb_hdr) - 1)
|| (pdu_length >
@@ -236,11 +246,12 @@
("Invalid size or format for SMB found with length %d and pdu_lenght %d",
length, pdu_length));
cifs_dump_mem("Received Data is: ",temp,sizeof(struct smb_hdr));
- /* BB fix by finding next smb signature - and reading off data until next smb ? BB */
-
- /* BB add reconnect here */
-
- break;
+ /* could we fix this network corruption by finding next
+ smb header (instead of killing the session) and
+ restart reading from next valid SMB found? */
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ continue;
} else { /* length ok */
length = 0;
@@ -248,19 +259,16 @@
iov.iov_len = pdu_length;
for (total_read = 0; total_read < pdu_length; total_read += length) {
/* Should improve check for buffer overflow with bad pdu_length */
- /* iov.iov_base = smb_buffer+total_read;
- iov.iov_len = pdu_length-total_read; */
length = sock_recvmsg(csocket, &smb_msg,
pdu_length - total_read, 0);
- /* cERROR(1,("For iovlen %d Length received: %d with total read %d",
- iov.iov_len, length,total_read)); */
if (length == 0) {
cERROR(1,
("Zero length receive when expecting %d ",
pdu_length - total_read));
- /* BB add reconnect here */
- break;
- }
+ cifs_reconnect(server);
+ csocket = server->ssocket;
+ continue;
+ }
}
}
@@ -272,7 +280,7 @@
}
task_to_wake = NULL;
- read_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct
mid_q_entry,
@@ -288,7 +296,7 @@
MID_RESPONSE_RECEIVED;
}
}
- read_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
if (task_to_wake) {
smb_buffer = NULL; /* will be freed by users thread after he is done */
wake_up_process(task_to_wake);
@@ -432,6 +440,20 @@
printk(KERN_WARNING "CIFS: domain name too long\n");
return 1;
}
+ } else if (strnicmp(data, "iocharset", 9) == 0) {
+ if (!value || !*value) {
+ printk(KERN_WARNING "CIFS: invalid iocharset specified\n");
+ return 1; /* needs_arg; */
+ }
+ if (strnlen(value, 65) < 65) {
+ if(strnicmp(value,"default",7))
+ vol->iocharset = value;
+ /* if iocharset not set load_nls_default used by caller */
+ cFYI(1, ("iocharset set to %s",value));
+ } else {
+ printk(KERN_WARNING "CIFS: iocharset name too long.\n");
+ return 1;
+ }
} else if (strnicmp(data, "uid", 3) == 0) {
if (value && *value) {
vol->linux_uid =
@@ -467,6 +489,19 @@
vol->wsize =
simple_strtoul(value, &value, 0);
}
+ } else if (strnicmp(data, "sockopt", 5) == 0) {
+ if (value && *value) {
+ vol->sockopt =
+ simple_strtoul(value, &value, 0);
+ }
+ } else if (strnicmp(data, "netbiosname", 4) == 0) {
+ if (!value || !*value) {
+ vol->source_rfc1001_name = NULL;
+ } else if (strnlen(value, 17) < 17) {
+ vol->source_rfc1001_name = value;
+ } else {
+ printk(KERN_WARNING "CIFS: netbiosname too long (more than 15)\n");
+ }
} else if (strnicmp(data, "version", 3) == 0) {
/* ignore */
} else if (strnicmp(data, "rw", 2) == 0) {
@@ -722,13 +757,13 @@
int rc = 0;
if(*csocket == NULL) {
- rc = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, csocket);
- if (rc < 0) {
+ rc = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, csocket);
+ if (rc < 0) {
cERROR(1, ("Error %d creating socket",rc));
*csocket = NULL;
return rc;
- } else {
- /* BB other socket options to set KEEPALIVE, timeouts? NODELAY? */
+ } else {
+ /* BB other socket options to set KEEPALIVE, NODELAY? */
cFYI(1,("Socket created"));
}
}
@@ -763,6 +798,11 @@
}
}
+ /* Eventually check for other socket options to change from
+ the default. sock_setsockopt not used because it expects
+ user space buffer */
+ (*csocket)->sk->sk_rcvtimeo = 8 * HZ;
+
return rc;
}
@@ -863,6 +903,19 @@
return -EINVAL;
}
+ /* this is needed for ASCII cp to Unicode converts */
+ if(volume_info.iocharset == NULL) {
+ cifs_sb->local_nls = load_nls_default();
+ /* load_nls_default can not return null */
+ } else {
+ cifs_sb->local_nls = load_nls(volume_info.iocharset);
+ if(cifs_sb->local_nls == NULL) {
+ cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset));
+ FreeXid(xid);
+ return -ELIBACC;
+ }
+ }
+
existingCifsSes =
find_tcp_session(sin_server.sin_addr.s_addr,
volume_info.username, &srvTcp);
@@ -999,6 +1052,8 @@
/* on error free sesinfo and tcon struct if needed */
if (rc) {
+ if(atomic_read(&srvTcp->socketUseCount) == 0)
+ srvTcp->tcpStatus = CifsExiting;
/* If find_unc succeeded then rc == 0 so we can not end */
if (tcon) /* up here accidently freeing someone elses tcon struct */
tconInfoFree(tcon);
diff -Nru a/fs/cifs/dir.c b/fs/cifs/dir.c
--- a/fs/cifs/dir.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/dir.c Sat Oct 25 11:45:09 2003
@@ -218,6 +218,8 @@
pCifsFile->netfid = fileHandle;
pCifsFile->pid = current->tgid;
pCifsFile->pInode = newinode;
+ pCifsFile->invalidHandle = FALSE;
+ pCifsFile->closePend = FALSE;
/* pCifsFile->pfile = file; */ /* put in at open time */
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist,&pTcon->openFileList);
diff -Nru a/fs/cifs/file.c b/fs/cifs/file.c
--- a/fs/cifs/file.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/file.c Sat Oct 25 11:45:09 2003
@@ -122,6 +122,12 @@
and calling get_inode_info with returned buf (at least
helps non-Unix server case */
buf = kmalloc(sizeof(FILE_ALL_INFO),GFP_KERNEL);
+ if(buf==0) {
+ if (full_path)
+ kfree(full_path);
+ FreeXid(xid);
+ return -ENOMEM;
+ }
rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
CREATE_NOT_DIR, &netfid, &oplock, buf, cifs_sb->local_nls);
if (rc) {
@@ -138,6 +144,8 @@
pCifsFile->pid = current->pid;
pCifsFile->pfile = file; /* needed for writepage */
pCifsFile->pInode = inode;
+ pCifsFile->invalidHandle = FALSE;
+ pCifsFile->closePend = FALSE;
write_lock(&file->f_owner.lock);
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist,&pTcon->openFileList);
@@ -200,48 +208,154 @@
return rc;
}
+static int cifs_reopen_file(struct inode *inode, struct file *file)
+{
+ int rc = -EACCES;
+ int xid, oplock;
+ struct cifs_sb_info *cifs_sb;
+ struct cifsTconInfo *pTcon;
+ struct cifsFileInfo *pCifsFile;
+ struct cifsInodeInfo *pCifsInode;
+ char *full_path = NULL;
+ int desiredAccess = 0x20197;
+ int disposition = FILE_OPEN;
+ __u16 netfid;
+ FILE_ALL_INFO * buf = NULL;
+
+ xid = GetXid();
+
+ cifs_sb = CIFS_SB(inode->i_sb);
+ pTcon = cifs_sb->tcon;
+
+ full_path = build_path_from_dentry(file->f_dentry);
+
+ cFYI(1, (" inode = 0x%p file flags are 0x%x for %s", inode, file->f_flags,full_path));
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ desiredAccess = GENERIC_READ;
+ else if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+ desiredAccess = GENERIC_WRITE;
+ else if ((file->f_flags & O_ACCMODE) == O_RDWR)
+ desiredAccess = GENERIC_ALL;
+ if (oplockEnabled)
+ oplock = REQ_OPLOCK;
+ else
+ oplock = FALSE;
+
+ /* BB pass O_SYNC flag through on file attributes .. BB */
+
+ /* Also refresh inode by passing in file_info buf returned by SMBOpen
+ and calling get_inode_info with returned buf (at least
+ helps non-Unix server case */
+ buf = kmalloc(sizeof(FILE_ALL_INFO),GFP_KERNEL);
+ if(buf==0) {
+ if (full_path)
+ kfree(full_path);
+ FreeXid(xid);
+ return -ENOMEM;
+ }
+ rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
+ CREATE_NOT_DIR, &netfid, &oplock, buf, cifs_sb->local_nls);
+ if (rc) {
+ cFYI(1, ("cifs_open returned 0x%x ", rc));
+ cFYI(1, ("oplock: %d ", oplock));
+ } else {
+ if (file->private_data) {
+ pCifsFile = (struct cifsFileInfo *) file->private_data;
+
+ pCifsFile->netfid = netfid;
+ pCifsFile->invalidHandle = FALSE;
+ pCifsInode = CIFS_I(file->f_dentry->d_inode);
+ if(pCifsInode) {
+ if (pTcon->ses->capabilities & CAP_UNIX)
+ rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
+ full_path, inode->i_sb);
+ else
+ rc = cifs_get_inode_info(&file->f_dentry->d_inode,
+ full_path, buf, inode->i_sb);
+
+ if(oplock == OPLOCK_EXCLUSIVE) {
+ pCifsInode->clientCanCacheAll = TRUE;
+ pCifsInode->clientCanCacheRead = TRUE;
+ cFYI(1,("Exclusive Oplock granted on inode %p",file->f_dentry->d_inode));
+ } else if(oplock == OPLOCK_READ) {
+ pCifsInode->clientCanCacheRead = TRUE;
+ pCifsInode->clientCanCacheAll = FALSE;
+ } else {
+ pCifsInode->clientCanCacheRead = FALSE;
+ pCifsInode->clientCanCacheAll = FALSE;
+ }
+ }
+ } else
+ rc = -EBADF;
+ }
+
+ if (buf)
+ kfree(buf);
+ if (full_path)
+ kfree(full_path);
+ FreeXid(xid);
+ return rc;
+}
+
/* Try to reopen files that were closed when session to server was lost */
int reopen_files(struct cifsTconInfo * pTcon, struct nls_table * nlsinfo)
{
int rc = 0;
struct cifsFileInfo *open_file = NULL;
struct file * file = NULL;
- struct list_head *tmp;
- struct list_head *tmp1;
+ struct list_head invalid_file_list;
+ struct list_head * tmp;
+ struct list_head * tmp1;
+
+ INIT_LIST_HEAD(&invalid_file_list);
-/* list all files open on tree connection */
- read_lock(&GlobalSMBSeslock);
+/* list all files open on tree connection and mark them invalid */
+ write_lock(&GlobalSMBSeslock);
list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
open_file = list_entry(tmp,struct cifsFileInfo, tlist);
if(open_file) {
- if(open_file->search_resume_name) {
- kfree(open_file->search_resume_name);
+ open_file->invalidHandle = TRUE;
+ list_move(&open_file->tlist,&invalid_file_list);
+ }
+ }
+
+ /* reopen files */
+ list_for_each_safe(tmp,tmp1, &invalid_file_list) {
+ /* BB need to fix above to check list end and skip entries we do not need to reopen */
+ open_file = list_entry(tmp,struct cifsFileInfo, tlist);
+ if(open_file == NULL) {
+ break;
+ } else {
+ if((open_file->invalidHandle == FALSE) &&
+ (open_file->closePend == FALSE)) {
+ list_move(&open_file->tlist,&pTcon->openFileList);
+ continue;
}
file = open_file->pfile;
- list_del(&open_file->flist);
- list_del(&open_file->tlist);
- kfree(open_file);
- if(file) {
- file->private_data = NULL;
- read_unlock(&GlobalSMBSeslock);
- if(file->f_dentry == 0) {
- cFYI(1,("Null dentry for file %p",file));
- read_lock(&GlobalSMBSeslock);
+ if(file->f_dentry == 0) {
+ cFYI(1,("Null dentry for file %p",file));
+ } else {
+ write_unlock(&GlobalSMBSeslock);
+ rc = cifs_reopen_file(file->f_dentry->d_inode,file);
+ write_lock(&GlobalSMBSeslock);
+ if(file->private_data == NULL) {
+ tmp = invalid_file_list.next;
+ tmp1 = tmp->next;
+ continue;
+ }
+
+ list_move(&open_file->tlist,&pTcon->openFileList);
+ if(rc) {
+ cFYI(1,("reconnecting file %s failed with %d",
+ file->f_dentry->d_name.name,rc));
} else {
- rc = cifs_open(file->f_dentry->d_inode,file);
- read_lock(&GlobalSMBSeslock);
- if(rc) {
- cFYI(1,("reconnecting file %s failed with %d",
- file->f_dentry->d_name.name,rc));
- } else {
- cFYI(1,("reconnection of %s succeeded",
- file->f_dentry->d_name.name));
- }
- }
+ cFYI(1,("reconnection of %s succeeded",
+ file->f_dentry->d_name.name));
+ }
}
}
}
- read_unlock(&GlobalSMBSeslock);
+ write_unlock(&GlobalSMBSeslock);
return rc;
}
@@ -260,11 +374,20 @@
cifs_sb = CIFS_SB(inode->i_sb);
pTcon = cifs_sb->tcon;
if (pSMBFile) {
+ pSMBFile->closePend = TRUE;
write_lock(&file->f_owner.lock);
+ if(pTcon) {
+ /* no sense reconnecting to close a file that is
+ already closed */
+ if (pTcon->tidStatus != CifsNeedReconnect) {
+ write_unlock(&file->f_owner.lock);
+ rc = CIFSSMBClose(xid,pTcon,pSMBFile->netfid);
+ write_lock(&file->f_owner.lock);
+ }
+ }
list_del(&pSMBFile->flist);
list_del(&pSMBFile->tlist);
write_unlock(&file->f_owner.lock);
- rc = CIFSSMBClose(xid, pTcon, pSMBFile->netfid);
if(pSMBFile->search_resume_name)
kfree(pSMBFile->search_resume_name);
kfree(file->private_data);
@@ -447,7 +570,6 @@
&bytes_written,
write_data + total_written, long_op);
if (rc || (bytes_written == 0)) {
- FreeXid(xid);
if (total_written)
break;
else {
@@ -492,7 +614,6 @@
/* figure out which file struct to use
if (file->private_data == NULL) {
- kunmap(page);
FreeXid(xid);
return -EBADF;
}
@@ -528,7 +649,7 @@
cifsInode = CIFS_I(mapping->host);
- read_lock(&GlobalSMBSeslock);
+ read_lock(&GlobalSMBSeslock);
list_for_each_safe(tmp, tmp1, &cifsInode->openFileList) {
open_file = list_entry(tmp,struct cifsFileInfo, flist);
/* We check if file is open for writing first */
@@ -546,6 +667,13 @@
} else if(bytes_written < 0) {
rc = bytes_written;
}
+ break; /* now that we found a valid file handle
+ and tried to write to it we are done, no
+ sense continuing to loop looking for another */
+ }
+ if(tmp->next == NULL) {
+ cFYI(1,("File instance %p removed",tmp));
+ break;
}
}
read_unlock(&GlobalSMBSeslock);
@@ -836,6 +964,7 @@
for(i = 0;ipage_lock);
if(list_empty(page_list)) {
+ spin_unlock(&mapping->page_lock);
break;
}
page = list_entry(page_list->prev, struct page, list);
@@ -1236,8 +1365,10 @@
rc = 0;
break;
}
- } else
+ } else {
+ cifsFile->invalidHandle = TRUE;
CIFSFindClose(xid, pTcon, cifsFile->netfid);
+ }
if(cifsFile->search_resume_name) {
kfree(cifsFile->search_resume_name);
cifsFile->search_resume_name = NULL;
@@ -1261,6 +1392,7 @@
cifsFile =
(struct cifsFileInfo *) file->private_data;
cifsFile->netfid = searchHandle;
+ cifsFile->invalidHandle = FALSE;
} else {
rc = -ENOMEM;
break;
diff -Nru a/fs/cifs/misc.c b/fs/cifs/misc.c
--- a/fs/cifs/misc.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/misc.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs/misc.c
*
- * Copyright (c) International Business Machines Corp., 2002,2003
+ * Copyright (C) International Business Machines Corp., 2002,2003
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -42,23 +42,23 @@
{
unsigned int xid;
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
GlobalTotalActiveXid++;
if (GlobalTotalActiveXid > GlobalMaxActiveXid)
GlobalMaxActiveXid = GlobalTotalActiveXid; /* keep high water mark for number of simultaneous vfs ops in our filesystem */
xid = GlobalCurrentXid++;
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
return xid;
}
void
_FreeXid(unsigned int xid)
{
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
/* if(GlobalTotalActiveXid == 0)
BUG(); */
GlobalTotalActiveXid--;
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
}
struct cifsSesInfo *
@@ -217,10 +217,10 @@
buffer->Pid = tmp & 0xFFFF;
tmp >>= 16;
buffer->PidHigh = tmp & 0xFFFF;
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
GlobalMid++;
buffer->Mid = GlobalMid;
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
if (treeCon) {
buffer->Tid = treeCon->tid;
if (treeCon->ses) {
diff -Nru a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
--- a/fs/cifs/smbdes.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/smbdes.c Sat Oct 25 11:45:09 2003
@@ -6,7 +6,7 @@
SMB authentication protocol
Copyright (C) Andrew Tridgell 1998
- Modified by Steve French (sfrench@us.ibm.com) 2002
+ Modified by Steve French (sfrench@us.ibm.com) 2002,2003
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -375,42 +375,4 @@
smbhash(out, in, key, forw);
key2[0] = key[7];
smbhash(out + 8, in + 8, key2, forw);
-}
-
-void
-SamOEMhash(unsigned char *data, unsigned char *key, int val)
-{
- unsigned char s_box[256];
- unsigned char index_i = 0;
- unsigned char index_j = 0;
- unsigned char j = 0;
- int ind;
-
- for (ind = 0; ind < 256; ind++) {
- s_box[ind] = (unsigned char) ind;
- }
-
- for (ind = 0; ind < 256; ind++) {
- unsigned char tc;
-
- j += (s_box[ind] + key[ind % 16]);
-
- tc = s_box[ind];
- s_box[ind] = s_box[j];
- s_box[j] = tc;
- }
- for (ind = 0; ind < val; ind++) {
- unsigned char tc;
- unsigned char t;
-
- index_i++;
- index_j += s_box[index_i];
-
- tc = s_box[index_i];
- s_box[index_i] = s_box[index_j];
- s_box[index_j] = tc;
-
- t = s_box[index_i] + s_box[index_j];
- data[ind] = data[ind] ^ s_box[t];
- }
}
diff -Nru a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
--- a/fs/cifs/smbencrypt.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/smbencrypt.c Sat Oct 25 11:45:09 2003
@@ -63,7 +63,6 @@
void cred_hash2(unsigned char *out, unsigned char *in, unsigned char *key);
void cred_hash3(unsigned char *out, unsigned char *in, unsigned char *key,
int forw);
-void SamOEMhash(unsigned char *data, unsigned char *key, int val);
/*The following definitions come from libsmb/smbencrypt.c */
@@ -75,8 +74,6 @@
void NTLMSSPOWFencrypt(unsigned char passwd[8],
unsigned char *ntlmchalresp, unsigned char p24[24]);
void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
-int make_oem_passwd_hash(char data[516], const char *passwd,
- unsigned char old_pw_hash[16], int unicode);
int decode_pw_buffer(char in_buffer[516], char *new_pwrd,
int new_pwrd_size, __u32 * new_pw_len);
@@ -344,46 +341,6 @@
/* data_blob_free(&lmv2_client_data); */ /* BB fix BB */
return final_response;
-}
-
-int make_oem_passwd_hash(char data[516], const char *passwd,
- unsigned char old_pw_hash[16], int unicode)
-{
- int new_pw_len = strlen(passwd) * (unicode ? 2 : 1);
-
- if (new_pw_len > 512) {
- cERROR(1,
- ("CIFS make_oem_passwd_hash: new password is too long."));
- return FALSE;
- }
-
- /*
- * Now setup the data area.
- * We need to generate a random fill
- * for this area to make it harder to
- * decrypt. JRA.
- *
- */
- get_random_bytes(data, sizeof (data));
- if (unicode) {
- /* Note that passwd should be in DOS oem character set. */
- /* dos_struni2( &data[512 - new_pw_len], passwd, 512); */
- cifs_strtoUCS((wchar_t *) & data[512 - new_pw_len], passwd, 512, /* struct nls_table */
- load_nls_default());
- /* BB call unload_nls now or get nls differntly */
- } else {
- /* Note that passwd should be in DOS oem character set. */
- strcpy(&data[512 - new_pw_len], passwd);
- }
- SIVAL(data, 512, new_pw_len);
-
-#ifdef DEBUG_PASSWORD
- DEBUG(100, ("make_oem_passwd_hash\n"));
- dump_data(100, data, 516);
-#endif
- SamOEMhash((unsigned char *) data, (unsigned char *) old_pw_hash, 516);
-
- return TRUE;
}
void
diff -Nru a/fs/cifs/transport.c b/fs/cifs/transport.c
--- a/fs/cifs/transport.c Sat Oct 25 11:45:09 2003
+++ b/fs/cifs/transport.c Sat Oct 25 11:45:09 2003
@@ -1,7 +1,7 @@
/*
* fs/cifs/transport.c
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (C) International Business Machines Corp., 2002,2003
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -39,7 +39,6 @@
struct mid_q_entry *temp;
int timeout = 10 * HZ;
-/* BB add spinlock to protect midq for each session BB */
if (ses == NULL) {
cERROR(1, ("Null session passed in to AllocMidQEntry "));
return NULL;
@@ -72,11 +71,11 @@
}
if (ses->server->tcpStatus == CifsGood) {
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
list_add_tail(&temp->qhead, &ses->server->pending_mid_q);
atomic_inc(&midCount);
temp->midState = MID_REQUEST_ALLOCATED;
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
} else {
cERROR(1,("Need to reconnect after session died to server"));
if (temp)
@@ -89,12 +88,11 @@
void
DeleteMidQEntry(struct mid_q_entry *midEntry)
{
- /* BB add spinlock to protect midq for each session BB */
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
midEntry->midState = MID_FREE;
list_del(&midEntry->qhead);
atomic_dec(&midCount);
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
buf_release(midEntry->resp_buf);
kmem_cache_free(cifs_mid_cachep, midEntry);
}
@@ -115,9 +113,9 @@
temp->pinode = pinode;
temp->tcon = tcon;
temp->netfid = fid;
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
list_add_tail(&temp->qhead, &GlobalOplock_Q);
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
}
return temp;
@@ -125,11 +123,10 @@
void DeleteOplockQEntry(struct oplock_q_entry * oplockEntry)
{
- /* BB add spinlock to protect midq for each session BB */
- write_lock(&GlobalMid_Lock);
+ spin_lock(&GlobalMid_Lock);
/* should we check if list empty first? */
list_del(&oplockEntry->qhead);
- write_unlock(&GlobalMid_Lock);
+ spin_unlock(&GlobalMid_Lock);
kmem_cache_free(cifs_oplock_cachep, oplockEntry);
}
@@ -240,6 +237,7 @@
else {
cFYI(1,("No response buffer"));
DeleteMidQEntry(midQ);
+ ses->server->tcpStatus = CifsNeedReconnect;
return -EIO;
}
}
diff -Nru a/fs/dquot.c b/fs/dquot.c
--- a/fs/dquot.c Sat Oct 25 11:45:09 2003
+++ b/fs/dquot.c Sat Oct 25 11:45:09 2003
@@ -826,28 +826,49 @@
}
/*
- * Release all quota for the specified inode.
- *
- * Note: this is a blocking operation.
+ * Remove references to quota from inode
+ * This function needs dqptr_sem for writing
*/
-static void dquot_drop_nolock(struct inode *inode)
+static void dquot_drop_iupdate(struct inode *inode, struct dquot **to_drop)
{
int cnt;
inode->i_flags &= ~S_QUOTA;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] == NODQUOT)
- continue;
- dqput(inode->i_dquot[cnt]);
+ to_drop[cnt] = inode->i_dquot[cnt];
inode->i_dquot[cnt] = NODQUOT;
}
}
+/*
+ * Release all quotas referenced by inode
+ */
void dquot_drop(struct inode *inode)
{
+ struct dquot *to_drop[MAXQUOTAS];
+ int cnt;
+
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- dquot_drop_nolock(inode);
+ dquot_drop_iupdate(inode, to_drop);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (to_drop[cnt] != NODQUOT)
+ dqput(to_drop[cnt]);
+}
+
+/*
+ * Release all quotas referenced by inode.
+ * This function assumes dqptr_sem for writing
+ */
+void dquot_drop_nolock(struct inode *inode)
+{
+ struct dquot *to_drop[MAXQUOTAS];
+ int cnt;
+
+ dquot_drop_iupdate(inode, to_drop);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (to_drop[cnt] != NODQUOT)
+ dqput(to_drop[cnt]);
}
/*
@@ -862,6 +883,10 @@
warntype[cnt] = NOWARN;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ return QUOTA_OK;
+ }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt] == NODQUOT)
@@ -894,6 +919,10 @@
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = NOWARN;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ return QUOTA_OK;
+ }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt] == NODQUOT)
@@ -923,6 +952,10 @@
unsigned int cnt;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ return;
+ }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt] == NODQUOT)
@@ -942,6 +975,10 @@
unsigned int cnt;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ return;
+ }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt] == NODQUOT)
diff -Nru a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
--- a/fs/hugetlbfs/inode.c Sat Oct 25 11:45:09 2003
+++ b/fs/hugetlbfs/inode.c Sat Oct 25 11:45:09 2003
@@ -412,10 +412,18 @@
static int hugetlbfs_mknod(struct inode *dir,
struct dentry *dentry, int mode, dev_t dev)
{
- struct inode *inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid,
- current->fsgid, mode, dev);
+ struct inode *inode;
int error = -ENOSPC;
+ gid_t gid;
+ if (dir->i_mode & S_ISGID) {
+ gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ } else {
+ gid = current->fsgid;
+ }
+ inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, gid, mode, dev);
if (inode) {
dir->i_size += PSEUDO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -444,9 +452,15 @@
{
struct inode *inode;
int error = -ENOSPC;
+ gid_t gid;
+
+ if (dir->i_mode & S_ISGID)
+ gid = dir->i_gid;
+ else
+ gid = current->fsgid;
inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid,
- current->fsgid, S_IFLNK|S_IRWXUGO, 0);
+ gid, S_IFLNK|S_IRWXUGO, 0);
if (inode) {
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
diff -Nru a/fs/jbd/commit.c b/fs/jbd/commit.c
--- a/fs/jbd/commit.c Sat Oct 25 11:45:09 2003
+++ b/fs/jbd/commit.c Sat Oct 25 11:45:09 2003
@@ -172,6 +172,14 @@
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
JBUFFER_TRACE(jh, "reserved, unused: refile");
+ /*
+ * A journal_get_undo_access()+journal_release_buffer() may
+ * leave undo-committed data.
+ */
+ if (jh->b_committed_data) {
+ kfree(jh->b_committed_data);
+ jh->b_committed_data = NULL;
+ }
journal_refile_buffer(journal, jh);
}
diff -Nru a/fs/jbd/journal.c b/fs/jbd/journal.c
--- a/fs/jbd/journal.c Sat Oct 25 11:45:09 2003
+++ b/fs/jbd/journal.c Sat Oct 25 11:45:09 2003
@@ -342,7 +342,7 @@
tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS);
jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) {
- kfree(new_page);
+ kfree(tmp);
goto repeat;
}
@@ -1729,6 +1729,8 @@
J_ASSERT_BH(bh, buffer_jbd(bh));
J_ASSERT_BH(bh, jh2bh(jh) == bh);
BUFFER_TRACE(bh, "remove journal_head");
+ J_ASSERT_BH(bh, !jh->b_frozen_data);
+ J_ASSERT_BH(bh, !jh->b_committed_data);
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
clear_buffer_jbd(bh);
diff -Nru a/fs/libfs.c b/fs/libfs.c
--- a/fs/libfs.c Sat Oct 25 11:45:09 2003
+++ b/fs/libfs.c Sat Oct 25 11:45:09 2003
@@ -32,6 +32,8 @@
struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
+ if (dentry->d_name.len > NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
d_add(dentry, NULL);
return NULL;
}
diff -Nru a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
--- a/fs/nfsd/nfs3xdr.c Sat Oct 25 11:45:09 2003
+++ b/fs/nfsd/nfs3xdr.c Sat Oct 25 11:45:09 2003
@@ -825,6 +825,8 @@
dchild = lookup_one_len(name, dparent,namlen);
if (IS_ERR(dchild))
goto noexec;
+ if (d_mountpoint(dchild))
+ goto noexec;
if (fh_compose(&fh, exp, dchild, &cd->fh) != 0 || !dchild->d_inode)
goto noexec;
p = encode_post_op_attr(cd->rqstp, p, &fh);
diff -Nru a/fs/ramfs/inode.c b/fs/ramfs/inode.c
--- a/fs/ramfs/inode.c Sat Oct 25 11:45:09 2003
+++ b/fs/ramfs/inode.c Sat Oct 25 11:45:09 2003
@@ -95,6 +95,11 @@
int error = -ENOSPC;
if (inode) {
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ inode->i_mode |= S_ISGID;
+ }
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
error = 0;
@@ -125,6 +130,8 @@
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
if (!error) {
+ if (dir->i_mode & S_ISGID)
+ inode->i_gid = dir->i_gid;
d_instantiate(dentry, inode);
dget(dentry);
} else
diff -Nru a/fs/xfs/linux/xfs_ioctl.c b/fs/xfs/linux/xfs_ioctl.c
--- a/fs/xfs/linux/xfs_ioctl.c Sat Oct 25 11:45:09 2003
+++ b/fs/xfs/linux/xfs_ioctl.c Sat Oct 25 11:45:09 2003
@@ -226,8 +226,8 @@
xfs_inode_t *ip;
struct inode *inodep;
vnode_t *vpp;
+ xfs_ino_t ino;
__u32 igen;
- ino_t ino;
int error;
if (!capable(cap))
diff -Nru a/fs/xfs/linux/xfs_vnode.h b/fs/xfs/linux/xfs_vnode.h
--- a/fs/xfs/linux/xfs_vnode.h Sat Oct 25 11:45:10 2003
+++ b/fs/xfs/linux/xfs_vnode.h Sat Oct 25 11:45:10 2003
@@ -30,7 +30,7 @@
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*
* Portions Copyright (c) 1989, 1993
- * The Regents of the University of California. All rights reserved.
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,11 +40,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
+ * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
diff -Nru a/fs/xfs/support/move.h b/fs/xfs/support/move.h
--- a/fs/xfs/support/move.h Sat Oct 25 11:45:09 2003
+++ b/fs/xfs/support/move.h Sat Oct 25 11:45:09 2003
@@ -30,7 +30,7 @@
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*
* Portions Copyright (c) 1982, 1986, 1993, 1994
- * The Regents of the University of California. All rights reserved.
+ * The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,11 +40,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
+ * 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
@@ -73,11 +69,11 @@
};
struct uio {
- struct iovec *uio_iov;
- int uio_iovcnt;
- xfs_off_t uio_offset;
- int uio_resid;
- enum uio_seg uio_segflg;
+ struct iovec *uio_iov; /* pointer to array of iovecs */
+ int uio_iovcnt; /* number of iovecs in array */
+ xfs_off_t uio_offset; /* offset in file this uio corresponds to */
+ int uio_resid; /* residual i/o count */
+ enum uio_seg uio_segflg; /* see above */
};
typedef struct uio uio_t;
diff -Nru a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
--- a/fs/xfs/xfs_dir2_node.c Sat Oct 25 11:45:09 2003
+++ b/fs/xfs/xfs_dir2_node.c Sat Oct 25 11:45:09 2003
@@ -1371,9 +1371,6 @@
xfs_dir2_db_t fbno; /* freespace block number */
xfs_dabuf_t *fbp; /* freespace buffer */
int findex; /* freespace entry index */
- xfs_dir2_db_t foundbno=0; /* found freespace block no */
- int foundindex=0; /* found freespace entry idx */
- int foundhole; /* found hole in freespace */
xfs_dir2_free_t *free=NULL; /* freespace block structure */
xfs_dir2_db_t ifbno; /* initial freespace block no */
xfs_dir2_db_t lastfbno=0; /* highest freespace block no */
@@ -1382,7 +1379,6 @@
xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log data header */
int needscan; /* need to rescan data frees */
- int needfreesp; /* need to allocate freesp blk */
xfs_dir2_data_off_t *tagp; /* data entry tag pointer */
xfs_trans_t *tp; /* transaction pointer */
@@ -1390,7 +1386,6 @@
mp = dp->i_mount;
tp = args->trans;
length = XFS_DIR2_DATA_ENTSIZE(args->namelen);
- foundhole = 0;
/*
* If we came in with a freespace block that means that lookup
* found an entry with our hash value. This is the freespace
@@ -1445,7 +1440,6 @@
return error;
lastfbno = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo);
fbno = ifbno;
- foundindex = -1;
}
/*
* While we haven't identified a data block, search the freeblock
@@ -1485,7 +1479,6 @@
return error;
}
if (unlikely(fbp == NULL)) {
- foundhole = 1;
continue;
}
free = fbp->data;
@@ -1500,30 +1493,10 @@
dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex;
else {
/*
- * If we haven't found an empty entry yet, and this
- * one is empty, remember this slot.
- */
- if (foundindex == -1 &&
- INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF && !foundhole) {
- foundindex = findex;
- foundbno = fbno;
- }
- /*
* Are we done with the freeblock?
*/
if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) {
/*
- * If there is space left in this freeblock,
- * and we don't have an empty entry yet,
- * remember this slot.
- */
- if (foundindex == -1 &&
- findex < XFS_DIR2_MAX_FREE_BESTS(mp) &&
- !foundhole) {
- foundindex = findex;
- foundbno = fbno;
- }
- /*
* Drop the block.
*/
xfs_da_brelse(tp, fbp);
@@ -1553,9 +1526,10 @@
/*
* Allocate and initialize the new data block.
*/
- if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE,
- &dbno)) ||
- (error = xfs_dir2_data_init(args, dbno, &dbp))) {
+ if (unlikely((error = xfs_dir2_grow_inode(args,
+ XFS_DIR2_DATA_SPACE,
+ &dbno)) ||
+ (error = xfs_dir2_data_init(args, dbno, &dbp)))) {
/*
* Drop the freespace buffer unless it came from our
* caller.
@@ -1565,55 +1539,55 @@
return error;
}
/*
- * If the freespace entry for this data block is not in the
- * freespace block we have in hand, drop the one we have
- * and get the right one.
+ * If (somehow) we have a freespace block, get rid of it.
*/
- needfreesp = 0;
- if (XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno || fbp == NULL) {
- if (fbp)
- xfs_da_brelse(tp, fbp);
- if (fblk && fblk->bp)
- fblk->bp = NULL;
- fbno = XFS_DIR2_DB_TO_FDB(mp, dbno);
- if ((error = xfs_da_read_buf(tp, dp,
- XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp,
- XFS_DATA_FORK))) {
- xfs_da_buf_done(dbp);
- return error;
- }
-
- /*
- * If there wasn't a freespace block, the read will
- * return a NULL fbp. Allocate one later.
- */
-
- if(unlikely( fbp == NULL )) {
- needfreesp = 1;
- } else {
- free = fbp->data;
- ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
- }
- }
-
+ if (fbp)
+ xfs_da_brelse(tp, fbp);
+ if (fblk && fblk->bp)
+ fblk->bp = NULL;
+
+ /*
+ * Get the freespace block corresponding to the data block
+ * that was just allocated.
+ */
+ fbno = XFS_DIR2_DB_TO_FDB(mp, dbno);
+ if (unlikely(error = xfs_da_read_buf(tp, dp,
+ XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp,
+ XFS_DATA_FORK))) {
+ xfs_da_buf_done(dbp);
+ return error;
+ }
/*
- * If we don't have a data block, and there's no free slot in a
- * freeblock, we need to add a new freeblock.
+ * If there wasn't a freespace block, the read will
+ * return a NULL fbp. Allocate and initialize a new one.
*/
- if (unlikely(needfreesp || foundindex == -1)) {
- /*
- * Add the new freeblock.
- */
+ if( fbp == NULL ) {
if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_FREE_SPACE,
&fbno))) {
return error;
}
- if (XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno) {
+ if (unlikely(XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno)) {
cmn_err(CE_ALERT,
- "xfs_dir2_node_addname_int: needed block %lld, got %lld\n",
- (long long)XFS_DIR2_DB_TO_FDB(mp, dbno),
- (long long)fbno);
+ "xfs_dir2_node_addname_int: dir ino "
+ "%llu needed freesp block %lld for\n"
+ " data block %lld, got %lld\n"
+ " ifbno %llu lastfbno %d\n",
+ dp->i_ino,
+ XFS_DIR2_DB_TO_FDB(mp, dbno),
+ dbno, fbno,
+ ifbno, lastfbno);
+ if (fblk) {
+ cmn_err(CE_ALERT,
+ " fblk 0x%llu blkno %llu "
+ "index %d magic 0x%x\n",
+ fblk, fblk->blkno,
+ fblk->index,
+ fblk->magic);
+ } else {
+ cmn_err(CE_ALERT,
+ " ... fblk is NULL\n");
+ }
XFS_ERROR_REPORT("xfs_dir2_node_addname_int",
XFS_ERRLEVEL_LOW, mp);
return XFS_ERROR(EFSCORRUPTED);
@@ -1640,8 +1614,9 @@
XFS_DIR2_MAX_FREE_BESTS(mp));
INT_ZERO(free->hdr.nvalid, ARCH_CONVERT);
INT_ZERO(free->hdr.nused, ARCH_CONVERT);
- foundindex = 0;
- foundbno = fbno;
+ } else {
+ free = fbp->data;
+ ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC);
}
/*
diff -Nru a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
--- a/fs/xfs/xfs_ialloc_btree.c Sat Oct 25 11:45:09 2003
+++ b/fs/xfs/xfs_ialloc_btree.c Sat Oct 25 11:45:09 2003
@@ -30,7 +30,6 @@
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
-
#include "xfs.h"
#include "xfs_macros.h"
@@ -858,7 +857,7 @@
xfs_agblock_t agbno; /* a.g. relative btree block number */
xfs_agnumber_t agno; /* allocation group number */
xfs_inobt_block_t *block=NULL; /* current btree block */
- int diff; /* difference for the current key */
+ __int64_t diff; /* difference for the current key */
int error; /* error return value */
int keyno=0; /* current key number */
int level; /* level in the btree */
@@ -883,7 +882,7 @@
*/
for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
xfs_buf_t *bp; /* buffer pointer for btree block */
- xfs_daddr_t d; /* disk address of btree block */
+ xfs_daddr_t d; /* disk address of btree block */
/*
* Get the disk address we're looking for.
@@ -977,7 +976,8 @@
/*
* Compute difference to get next direction.
*/
- diff = (int)startino - cur->bc_rec.i.ir_startino;
+ diff = (__int64_t)
+ startino - cur->bc_rec.i.ir_startino;
/*
* Less than, move right.
*/
diff -Nru a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
--- a/fs/xfs/xfs_log.c Sat Oct 25 11:45:10 2003
+++ b/fs/xfs/xfs_log.c Sat Oct 25 11:45:10 2003
@@ -2188,7 +2188,8 @@
* LOG_LOCK
* IOERROR - give up hope all ye who enter here
*/
- if (iclog->ic_state == XLOG_STATE_SYNCING ||
+ if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_state == XLOG_STATE_SYNCING ||
iclog->ic_state == XLOG_STATE_DONE_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR )
break;
diff -Nru a/include/asm-arm/arch-shark/hardware.h b/include/asm-arm/arch-shark/hardware.h
--- a/include/asm-arm/arch-shark/hardware.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-arm/arch-shark/hardware.h Sat Oct 25 11:45:09 2003
@@ -51,6 +51,7 @@
#define PCIBIOS_MIN_IO 0x6000
#define PCIBIOS_MIN_MEM 0x50000000
+#define PCIMEM_BASE 0xe8000000
#endif
diff -Nru a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
--- a/include/asm-i386/hw_irq.h Sat Oct 25 11:45:10 2003
+++ b/include/asm-i386/hw_irq.h Sat Oct 25 11:45:10 2003
@@ -25,8 +25,8 @@
* Interrupt entry/exit code at both C and assembly level
*/
-extern int irq_vector[NR_IRQS];
-#define IO_APIC_VECTOR(irq) irq_vector[irq]
+extern u8 irq_vector[NR_IRQ_VECTORS];
+#define IO_APIC_VECTOR(irq) ((int)irq_vector[irq])
extern void (*interrupt[NR_IRQS])(void);
diff -Nru a/include/asm-i386/mach-default/irq_vectors.h b/include/asm-i386/mach-default/irq_vectors.h
--- a/include/asm-i386/mach-default/irq_vectors.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-i386/mach-default/irq_vectors.h Sat Oct 25 11:45:09 2003
@@ -78,8 +78,14 @@
*/
#ifdef CONFIG_X86_IO_APIC
#define NR_IRQS 224
+# if (224 >= 32 * NR_CPUS)
+# define NR_IRQ_VECTORS NR_IRQS
+# else
+# define NR_IRQ_VECTORS (32 * NR_CPUS)
+# endif
#else
#define NR_IRQS 16
+#define NR_IRQ_VECTORS NR_IRQS
#endif
#define FPU_IRQ 13
diff -Nru a/include/asm-i386/mach-pc9800/irq_vectors.h b/include/asm-i386/mach-pc9800/irq_vectors.h
--- a/include/asm-i386/mach-pc9800/irq_vectors.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-i386/mach-pc9800/irq_vectors.h Sat Oct 25 11:45:09 2003
@@ -18,6 +18,9 @@
* The total number of interrupt vectors (including all the
* architecture specific interrupts) needed.
*
+ * NR_IRQ_VECTORS:
+ * The total number of IO APIC vector inputs
+ *
*/
#ifndef _ASM_IRQ_VECTORS_H
#define _ASM_IRQ_VECTORS_H
@@ -81,6 +84,8 @@
#else
#define NR_IRQS 16
#endif
+
+#define NR_IRQ_VECTORS NR_IRQS
#define FPU_IRQ 8
diff -Nru a/include/asm-i386/mach-visws/irq_vectors.h b/include/asm-i386/mach-visws/irq_vectors.h
--- a/include/asm-i386/mach-visws/irq_vectors.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-i386/mach-visws/irq_vectors.h Sat Oct 25 11:45:09 2003
@@ -50,6 +50,7 @@
*
*/
#define NR_IRQS 224
+#define NR_IRQ_VECTORS NR_IRQS
#define FPU_IRQ 13
diff -Nru a/include/asm-i386/mach-voyager/irq_vectors.h b/include/asm-i386/mach-voyager/irq_vectors.h
--- a/include/asm-i386/mach-voyager/irq_vectors.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-i386/mach-voyager/irq_vectors.h Sat Oct 25 11:45:09 2003
@@ -56,6 +56,7 @@
#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
#define NR_IRQS 224
+#define NR_IRQ_VECTORS NR_IRQS
#define FPU_IRQ 13
diff -Nru a/include/asm-i386/pci.h b/include/asm-i386/pci.h
--- a/include/asm-i386/pci.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-i386/pci.h Sat Oct 25 11:45:09 2003
@@ -20,6 +20,8 @@
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM (pci_mem_start)
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
diff -Nru a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h
--- a/include/asm-ia64/ia32.h Sat Oct 25 11:45:10 2003
+++ b/include/asm-ia64/ia32.h Sat Oct 25 11:45:10 2003
@@ -9,9 +9,11 @@
#ifdef CONFIG_IA32_SUPPORT
extern void ia32_cpu_init (void);
+extern void ia32_boot_gdt_init (void);
extern void ia32_gdt_init (void);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
+extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
#endif /* !CONFIG_IA32_SUPPORT */
diff -Nru a/include/asm-ia64/io.h b/include/asm-ia64/io.h
--- a/include/asm-ia64/io.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-ia64/io.h Sat Oct 25 11:45:09 2003
@@ -72,6 +72,9 @@
return (void *) (address + PAGE_OFFSET);
}
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
+
/*
* The following two macros are deprecated and scheduled for removal.
* Please use the PCI-DMA interface defined in instead.
diff -Nru a/include/asm-ia64/module.h b/include/asm-ia64/module.h
--- a/include/asm-ia64/module.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-ia64/module.h Sat Oct 25 11:45:09 2003
@@ -18,7 +18,8 @@
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
- void *unw_table; /* unwind-table cookie returned by unwinder */
+ void *core_unw_table; /* core unwind-table cookie returned by unwinder */
+ void *init_unw_table; /* init unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
};
diff -Nru a/include/asm-ia64/namei.h b/include/asm-ia64/namei.h
--- a/include/asm-ia64/namei.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-ia64/namei.h Sat Oct 25 11:45:09 2003
@@ -9,7 +9,7 @@
#include
#include
-#define EMUL_PREFIX_LINUX_IA32 "emul/ia32-linux/"
+#define EMUL_PREFIX_LINUX_IA32 "/emul/ia32-linux/"
static inline char *
__emul_prefix (void)
diff -Nru a/include/asm-ia64/numnodes.h b/include/asm-ia64/numnodes.h
--- a/include/asm-ia64/numnodes.h Sat Oct 25 11:45:10 2003
+++ b/include/asm-ia64/numnodes.h Sat Oct 25 11:45:10 2003
@@ -4,7 +4,7 @@
#ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */
#define NODES_SHIFT 3
-#elif defined(CONFIG_IA64_SGI_SN2)
+#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 128 Nodes */
#define NODES_SHIFT 7
#endif
diff -Nru a/include/asm-ia64/perfmon_default_smpl.h b/include/asm-ia64/perfmon_default_smpl.h
--- a/include/asm-ia64/perfmon_default_smpl.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-ia64/perfmon_default_smpl.h Sat Oct 25 11:45:09 2003
@@ -36,11 +36,12 @@
*/
typedef struct {
unsigned long hdr_count; /* how many valid entries */
- void *hdr_cur_pos; /* current position in the buffer */
- void *hdr_last_pos; /* first byte beyond buffer */
+ unsigned long hdr_cur_offs; /* current offset from top of buffer */
+ unsigned long hdr_reserved2; /* reserved for future use */
unsigned long hdr_overflows; /* how many times the buffer overflowed */
unsigned long hdr_buf_size; /* how many bytes in the buffer */
+
unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
unsigned int hdr_reserved1; /* for future use */
unsigned long hdr_reserved[10]; /* for future use */
diff -Nru a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
--- a/include/asm-ia64/processor.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-ia64/processor.h Sat Oct 25 11:45:09 2003
@@ -230,6 +230,22 @@
(int *) (addr)); \
})
+#ifdef CONFIG_IA32_SUPPORT
+struct desc_struct {
+ unsigned int a, b;
+};
+
+#define desc_empty(desc) (!((desc)->a + (desc)->b))
+#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define GDT_ENTRY_TLS_MIN 6
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+#endif
+
struct thread_struct {
__u32 flags; /* various thread flags (see IA64_THREAD_*) */
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
@@ -249,6 +265,9 @@
__u64 fdr; /* IA32 fp except. data reg */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
+ /* cached TLS descriptors. */
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+
# define INIT_THREAD_IA32 .eflag = 0, \
.fsr = 0, \
.fcr = 0x17800000037fULL, \
diff -Nru a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h
--- a/include/asm-ia64/unwind.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-ia64/unwind.h Sat Oct 25 11:45:09 2003
@@ -93,6 +93,12 @@
* The official API follows below:
*/
+struct unw_table_entry {
+ u64 start_offset;
+ u64 end_offset;
+ u64 info_offset;
+};
+
/*
* Initialize unwind support.
*/
diff -Nru a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
--- a/include/asm-sparc64/page.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-sparc64/page.h Sat Oct 25 11:45:09 2003
@@ -90,7 +90,13 @@
#endif /* (STRICT_MM_TYPECHECKS) */
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HPAGE_SHIFT 19
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HPAGE_SHIFT 16
+#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
diff -Nru a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
--- a/include/asm-sparc64/pgtable.h Sat Oct 25 11:45:09 2003
+++ b/include/asm-sparc64/pgtable.h Sat Oct 25 11:45:09 2003
@@ -12,6 +12,7 @@
* the SpitFire page tables.
*/
+#include
#include
#include
#include
@@ -136,9 +137,17 @@
#elif PAGE_SHIFT == 19
#define _PAGE_SZBITS _PAGE_SZ512K
#elif PAGE_SHIFT == 22
-#define _PAGE_SZBITS _PAGE_SZ4M
+#define _PAGE_SZBITS _PAGE_SZ4MB
#else
#error Wrong PAGE_SHIFT specified
+#endif
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define _PAGE_SZHUGE _PAGE_SZ4MB
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define _PAGE_SZHUGE _PAGE_512K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define _PAGE_SZHUGE _PAGE_64K
#endif
#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
diff -Nru a/include/linux/ata.h b/include/linux/ata.h
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/include/linux/ata.h Sat Oct 25 11:45:10 2003
@@ -0,0 +1,179 @@
+
+/*
+ Copyright 2003 Red Hat, Inc. All rights reserved.
+ Copyright 2003 Jeff Garzik
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+ */
+
+#ifndef __LINUX_ATA_H__
+#define __LINUX_ATA_H__
+
+/* defines only for the constants which don't work well as enums */
+#define ATA_DMA_BOUNDARY 0xffffUL
+#define ATA_DMA_MASK 0xffffffffULL
+
+enum {
+ /* various global constants */
+ ATA_MAX_DEVICES = 2, /* per bus/port */
+ ATA_MAX_PRD = 256, /* we could make these 256/256 */
+ ATA_SECT_SIZE = 512,
+ ATA_SECT_SIZE_MASK = (ATA_SECT_SIZE - 1),
+ ATA_SECT_DWORDS = ATA_SECT_SIZE / sizeof(u32),
+
+ ATA_ID_WORDS = 256,
+ ATA_ID_PROD_OFS = 27,
+ ATA_ID_SERNO_OFS = 10,
+ ATA_ID_MAJOR_VER = 80,
+ ATA_ID_PIO_MODES = 64,
+ ATA_ID_UDMA_MODES = 88,
+ ATA_ID_PIO4 = (1 << 1),
+
+ ATA_PCI_CTL_OFS = 2,
+ ATA_SERNO_LEN = 20,
+ ATA_UDMA0 = (1 << 0),
+ ATA_UDMA1 = ATA_UDMA0 | (1 << 1),
+ ATA_UDMA2 = ATA_UDMA1 | (1 << 2),
+ ATA_UDMA3 = ATA_UDMA2 | (1 << 3),
+ ATA_UDMA4 = ATA_UDMA3 | (1 << 4),
+ ATA_UDMA5 = ATA_UDMA4 | (1 << 5),
+ ATA_UDMA6 = ATA_UDMA5 | (1 << 6),
+ ATA_UDMA7 = ATA_UDMA6 | (1 << 7),
+ /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */
+
+ ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */
+
+ /* DMA-related */
+ ATA_PRD_SZ = 8,
+ ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ),
+ ATA_PRD_EOT = (1 << 31), /* end-of-table flag */
+
+ ATA_DMA_TABLE_OFS = 4,
+ ATA_DMA_STATUS = 2,
+ ATA_DMA_CMD = 0,
+ ATA_DMA_WR = (1 << 3),
+ ATA_DMA_START = (1 << 0),
+ ATA_DMA_INTR = (1 << 2),
+ ATA_DMA_ERR = (1 << 1),
+ ATA_DMA_ACTIVE = (1 << 0),
+
+ /* bits in ATA command block registers */
+ ATA_HOB = (1 << 7), /* LBA48 selector */
+ ATA_NIEN = (1 << 1), /* disable-irq flag */
+ ATA_LBA = (1 << 6), /* LBA28 selector */
+ ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */
+ ATA_BUSY = (1 << 7), /* BSY status bit */
+ ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */
+ ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */
+ ATA_DRQ = (1 << 3), /* data request i/o */
+ ATA_ERR = (1 << 0), /* have an error */
+ ATA_SRST = (1 << 2), /* software reset */
+ ATA_ABORTED = (1 << 2), /* command aborted */
+
+ /* ATA command block registers */
+ ATA_REG_DATA = 0x00,
+ ATA_REG_ERR = 0x01,
+ ATA_REG_NSECT = 0x02,
+ ATA_REG_LBAL = 0x03,
+ ATA_REG_LBAM = 0x04,
+ ATA_REG_LBAH = 0x05,
+ ATA_REG_DEVICE = 0x06,
+ ATA_REG_STATUS = 0x07,
+
+ ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */
+ ATA_REG_CMD = ATA_REG_STATUS,
+ ATA_REG_BYTEL = ATA_REG_LBAM,
+ ATA_REG_BYTEH = ATA_REG_LBAH,
+ ATA_REG_DEVSEL = ATA_REG_DEVICE,
+ ATA_REG_IRQ = ATA_REG_NSECT,
+
+ /* ATA taskfile protocols */
+ ATA_PROT_UNKNOWN = 0,
+ ATA_PROT_NODATA = 1,
+ ATA_PROT_PIO_READ = 2,
+ ATA_PROT_PIO_WRITE = 3,
+ ATA_PROT_DMA_READ = 4,
+ ATA_PROT_DMA_WRITE = 5,
+ ATA_PROT_ATAPI = 6,
+ ATA_PROT_ATAPI_DMA = 7,
+
+ /* ATA device commands */
+ ATA_CMD_EDD = 0x90, /* execute device diagnostic */
+ ATA_CMD_ID_ATA = 0xEC,
+ ATA_CMD_ID_ATAPI = 0xA1,
+ ATA_CMD_READ = 0xC8,
+ ATA_CMD_READ_EXT = 0x25,
+ ATA_CMD_WRITE = 0xCA,
+ ATA_CMD_WRITE_EXT = 0x35,
+ ATA_CMD_PIO_READ = 0x20,
+ ATA_CMD_PIO_READ_EXT = 0x24,
+ ATA_CMD_PIO_WRITE = 0x30,
+ ATA_CMD_PIO_WRITE_EXT = 0x34,
+ ATA_CMD_SET_FEATURES = 0xEF,
+ ATA_CMD_PACKET = 0xA0,
+
+ /* SETFEATURES stuff */
+ SETFEATURES_XFER = 0x03,
+ XFER_UDMA_7 = 0x47,
+ XFER_UDMA_6 = 0x46,
+ XFER_UDMA_5 = 0x45,
+ XFER_UDMA_4 = 0x44,
+ XFER_UDMA_3 = 0x43,
+ XFER_UDMA_2 = 0x42,
+ XFER_UDMA_1 = 0x41,
+ XFER_UDMA_0 = 0x40,
+ XFER_PIO_4 = 0x0C,
+ XFER_PIO_3 = 0x0B,
+
+ /* ATAPI stuff */
+ ATAPI_PKT_DMA = (1 << 0),
+
+ /* cable types */
+ ATA_CBL_NONE = 0,
+ ATA_CBL_PATA40 = 1,
+ ATA_CBL_PATA80 = 2,
+ ATA_CBL_PATA_UNK = 3,
+ ATA_CBL_SATA = 4,
+
+ /* SATA Status and Control Registers */
+ SCR_STATUS = 0,
+ SCR_ERROR = 1,
+ SCR_CONTROL = 2,
+ SCR_ACTIVE = 3,
+ SCR_NOTIFICATION = 4,
+};
+
+/* core structures */
+struct ata_prd {
+ u32 addr;
+ u32 flags_len;
+} __attribute__((packed));
+
+#define ata_id_is_ata(dev) (((dev)->id[0] & (1 << 15)) == 0)
+#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10))
+#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8))
+#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9))
+#define ata_id_u32(dev,n) \
+ (((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)]))
+#define ata_id_u64(dev,n) \
+ ( ((u64) dev->id[(n) + 3] << 48) | \
+ ((u64) dev->id[(n) + 2] << 32) | \
+ ((u64) dev->id[(n) + 1] << 16) | \
+ ((u64) dev->id[(n) + 0]) )
+
+#endif /* __LINUX_ATA_H__ */
diff -Nru a/include/linux/etherdevice.h b/include/linux/etherdevice.h
--- a/include/linux/etherdevice.h Sat Oct 25 11:45:09 2003
+++ b/include/linux/etherdevice.h Sat Oct 25 11:45:09 2003
@@ -39,7 +39,7 @@
extern int eth_header_parse(struct sk_buff *skb,
unsigned char *haddr);
extern struct net_device *__init_etherdev(struct net_device *dev, int sizeof_priv);
-static inline __deprecated struct net_device *init_etherdev(struct net_device *dev,
+static inline struct net_device *init_etherdev(struct net_device *dev,
int sizeof_priv)
{
return __init_etherdev(dev, sizeof_priv);
diff -Nru a/include/linux/ioport.h b/include/linux/ioport.h
--- a/include/linux/ioport.h Sat Oct 25 11:45:09 2003
+++ b/include/linux/ioport.h Sat Oct 25 11:45:09 2003
@@ -89,6 +89,7 @@
extern int get_resource_list(struct resource *, char *buf, int size);
extern int request_resource(struct resource *root, struct resource *new);
+extern struct resource * ____request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
diff -Nru a/include/linux/kernel.h b/include/linux/kernel.h
--- a/include/linux/kernel.h Sat Oct 25 11:45:09 2003
+++ b/include/linux/kernel.h Sat Oct 25 11:45:09 2003
@@ -101,7 +101,7 @@
extern void bust_spinlocks(int yes);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern int panic_on_oops;
-
+extern int system_running;
extern int tainted;
extern const char *print_tainted(void);
#define TAINT_PROPRIETARY_MODULE (1<<0)
diff -Nru a/include/linux/libata.h b/include/linux/libata.h
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/include/linux/libata.h Sat Oct 25 11:45:10 2003
@@ -0,0 +1,567 @@
+/*
+ Copyright 2003 Red Hat, Inc. All rights reserved.
+ Copyright 2003 Jeff Garzik
+
+ The contents of this file are subject to the Open
+ Software License version 1.1 that can be found at
+ http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ by reference.
+
+ Alternatively, the contents of this file may be used under the terms
+ of the GNU General Public License version 2 (the "GPL") as distributed
+ in the kernel source COPYING file, in which case the provisions of
+ the GPL are applicable instead of the above. If you wish to allow
+ the use of your version of this file only under the terms of the
+ GPL and not to allow others to use your version of this file under
+ the OSL, indicate your decision by deleting the provisions above and
+ replace them with the notice and other provisions required by the GPL.
+ If you do not delete the provisions above, a recipient may use your
+ version of this file under either the OSL or the GPL.
+
+ */
+
+#ifndef __LINUX_LIBATA_H__
+#define __LINUX_LIBATA_H__
+
+#include
+#include
+#include
+#include
+
+/*
+ * compile-time options
+ */
+#undef ATA_FORCE_PIO /* do not configure or use DMA */
+#undef ATA_DEBUG /* debugging output */
+#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
+#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
+#undef ATA_NDEBUG /* define to disable quick runtime checks */
+#undef ATA_ENABLE_ATAPI /* define to enable ATAPI support */
+#undef ATA_ENABLE_PATA /* define to enable PATA support in some
+ * low-level drivers */
+
+
+/* note: prints function name for you */
+#ifdef ATA_DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
+#ifdef ATA_VERBOSE_DEBUG
+#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
+#else
+#define VPRINTK(fmt, args...)
+#endif /* ATA_VERBOSE_DEBUG */
+#else
+#define DPRINTK(fmt, args...)
+#define VPRINTK(fmt, args...)
+#endif /* ATA_DEBUG */
+
+#ifdef ATA_NDEBUG
+#define assert(expr)
+#else
+#define assert(expr) \
+ if(unlikely(!(expr))) { \
+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__FUNCTION__,__LINE__); \
+ }
+#endif
+
+/* defines only for the constants which don't work well as enums */
+#define ATA_TAG_POISON 0xfafbfcfdU
+#define ATA_DMA_BOUNDARY 0xffffUL
+#define ATA_DMA_MASK 0xffffffffULL
+
+enum {
+ /* various global constants */
+ ATA_MAX_PORTS = 8,
+ ATA_DEF_QUEUE = 1,
+ ATA_MAX_QUEUE = 1,
+ ATA_MAX_SECTORS = 200, /* FIXME */
+ ATA_MAX_BUS = 2,
+ ATA_DEF_BUSY_WAIT = 10000,
+ ATA_SHORT_PAUSE = (HZ >> 6) + 1,
+
+ ATA_SHT_EMULATED = 1,
+ ATA_SHT_CMD_PER_LUN = 1,
+ ATA_SHT_THIS_ID = -1,
+ ATA_SHT_USE_CLUSTERING = 1,
+
+ /* struct ata_device stuff */
+ ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
+ ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
+ ATA_DFLAG_MASTER = (1 << 2), /* is device 0? */
+ ATA_DFLAG_WCACHE = (1 << 3), /* has write cache we can
+ * (hopefully) flush? */
+
+ ATA_DEV_UNKNOWN = 0, /* unknown device */
+ ATA_DEV_ATA = 1, /* ATA device */
+ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
+ ATA_DEV_ATAPI = 3, /* ATAPI device */
+ ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
+ ATA_DEV_NONE = 5, /* no device */
+
+ /* struct ata_port flags */
+ ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */
+ /* (doesn't imply presence) */
+ ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
+ ATA_FLAG_SATA = (1 << 3),
+ ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
+ ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */
+ ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
+ ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
+
+ /* struct ata_taskfile flags */
+ ATA_TFLAG_LBA48 = (1 << 0),
+ ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
+ ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
+
+ ATA_QCFLAG_WRITE = (1 << 0), /* read==0, write==1 */
+ ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
+ ATA_QCFLAG_DMA = (1 << 2), /* data delivered via DMA */
+ ATA_QCFLAG_ATAPI = (1 << 3), /* is ATAPI packet command? */
+ ATA_QCFLAG_SG = (1 << 4), /* have s/g table? */
+ ATA_QCFLAG_POLL = (1 << 5), /* polling, no interrupts */
+
+ /* struct ata_engine atomic flags (use test_bit, etc.) */
+ ATA_EFLG_ACTIVE = 0, /* engine is active */
+
+ /* various lengths of time */
+ ATA_TMOUT_EDD = 5 * HZ, /* hueristic */
+ ATA_TMOUT_PIO = 30 * HZ,
+ ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */
+ ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */
+ ATA_TMOUT_CDB = 30 * HZ,
+ ATA_TMOUT_CDB_QUICK = 5 * HZ,
+
+ /* ATA bus states */
+ BUS_UNKNOWN = 0,
+ BUS_DMA = 1,
+ BUS_IDLE = 2,
+ BUS_NOINTR = 3,
+ BUS_NODATA = 4,
+ BUS_TIMER = 5,
+ BUS_PIO = 6,
+ BUS_EDD = 7,
+ BUS_IDENTIFY = 8,
+ BUS_PACKET = 9,
+
+ /* thread states */
+ THR_UNKNOWN = 0,
+ THR_PORT_RESET = (THR_UNKNOWN + 1),
+ THR_AWAIT_DEATH = (THR_PORT_RESET + 1),
+ THR_PROBE_FAILED = (THR_AWAIT_DEATH + 1),
+ THR_IDLE = (THR_PROBE_FAILED + 1),
+ THR_PROBE_SUCCESS = (THR_IDLE + 1),
+ THR_PROBE_START = (THR_PROBE_SUCCESS + 1),
+ THR_PIO_POLL = (THR_PROBE_START + 1),
+ THR_PIO_TMOUT = (THR_PIO_POLL + 1),
+ THR_PIO = (THR_PIO_TMOUT + 1),
+ THR_PIO_LAST = (THR_PIO + 1),
+ THR_PIO_LAST_POLL = (THR_PIO_LAST + 1),
+ THR_PIO_ERR = (THR_PIO_LAST_POLL + 1),
+ THR_PACKET = (THR_PIO_ERR + 1),
+
+ /* SATA port states */
+ PORT_UNKNOWN = 0,
+ PORT_ENABLED = 1,
+ PORT_DISABLED = 2,
+
+ /* ata_qc_cb_t flags - note uses above ATA_QCFLAG_xxx namespace,
+ * but not numberspace
+ */
+ ATA_QCFLAG_TIMEOUT = (1 << 0),
+};
+
+/* forward declarations */
+struct ata_port_operations;
+struct ata_port;
+struct ata_queued_cmd;
+
+/* typedefs */
+typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc, unsigned int flags);
+
+struct ata_ioports {
+ unsigned long cmd_addr;
+ unsigned long data_addr;
+ unsigned long error_addr;
+ unsigned long nsect_addr;
+ unsigned long lbal_addr;
+ unsigned long lbam_addr;
+ unsigned long lbah_addr;
+ unsigned long device_addr;
+ unsigned long cmdstat_addr;
+ unsigned long ctl_addr;
+ unsigned long bmdma_addr;
+ unsigned long scr_addr;
+};
+
+struct ata_probe_ent {
+ struct list_head node;
+ struct pci_dev *pdev;
+ struct ata_port_operations *port_ops;
+ Scsi_Host_Template *sht;
+ struct ata_ioports port[ATA_MAX_PORTS];
+ unsigned int n_ports;
+ unsigned int pio_mask;
+ unsigned int udma_mask;
+ unsigned int legacy_mode;
+ unsigned long irq;
+ unsigned int irq_flags;
+ unsigned long host_flags;
+ void *mmio_base;
+};
+
+struct ata_host_set {
+ spinlock_t lock;
+ struct pci_dev *pdev;
+ unsigned long irq;
+ void *mmio_base;
+ unsigned int n_ports;
+ struct ata_port * ports[0];
+};
+
+struct ata_taskfile {
+ unsigned long flags; /* ATA_TFLAG_xxx */
+ u8 protocol; /* ATA_PROT_xxx */
+
+ u8 ctl; /* control reg */
+
+ u8 hob_feature; /* additional data */
+ u8 hob_nsect; /* to support LBA48 */
+ u8 hob_lbal;
+ u8 hob_lbam;
+ u8 hob_lbah;
+
+ u8 feature;
+ u8 nsect;
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+
+ u8 device;
+
+ u8 command; /* IO operation */
+};
+
+struct ata_queued_cmd {
+ struct ata_port *ap;
+ struct ata_device *dev;
+
+ Scsi_Cmnd *scsicmd;
+ void (*scsidone)(Scsi_Cmnd *);
+
+ struct list_head node;
+ unsigned long flags; /* ATA_QCFLAG_xxx */
+ unsigned int tag;
+ unsigned int n_elem;
+ unsigned int nsect;
+ unsigned int cursect;
+ unsigned int cursg;
+ unsigned int cursg_ofs;
+ struct ata_taskfile tf;
+ struct scatterlist sgent;
+
+ struct scatterlist *sg;
+
+ ata_qc_cb_t callback;
+
+ struct semaphore sem;
+};
+
+struct ata_host_stats {
+ unsigned long unhandled_irq;
+ unsigned long idle_irq;
+ unsigned long rw_reqbuf;
+};
+
+struct ata_device {
+ u64 n_sectors; /* size of device, if ATA */
+ unsigned long flags; /* ATA_DFLAG_xxx */
+ unsigned int class; /* ATA_DEV_xxx */
+ unsigned int devno; /* 0 or 1 */
+ u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+ unsigned int pio_mode;
+ unsigned int udma_mode;
+
+ unsigned char vendor[8]; /* space-padded, not ASCIIZ */
+ unsigned char product[32]; /* WARNING: shorter than
+ * ATAPI7 spec size, 40 ASCII
+ * characters
+ */
+};
+
+struct ata_engine {
+ unsigned long flags;
+ struct list_head q;
+};
+
+struct ata_port {
+ struct Scsi_Host *host; /* our co-allocated scsi host */
+ struct ata_port_operations *ops;
+ unsigned long flags; /* ATA_FLAG_xxx */
+ unsigned int id; /* unique id req'd by scsi midlyr */
+ unsigned int port_no; /* unique port #; from zero */
+
+ struct ata_prd *prd; /* our SG list */
+ dma_addr_t prd_dma; /* and its DMA mapping */
+
+ struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
+
+ u8 ctl; /* cache of ATA control register */
+ unsigned int bus_state;
+ unsigned int port_state;
+ unsigned int pio_mask;
+ unsigned int udma_mask;
+ unsigned int cbl; /* cable type; ATA_CBL_xxx */
+
+ struct ata_engine eng;
+
+ struct ata_device device[ATA_MAX_DEVICES];
+
+ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
+ unsigned long qactive;
+ unsigned int active_tag;
+
+ struct ata_host_stats stats;
+ struct ata_host_set *host_set;
+
+ struct semaphore sem;
+ struct semaphore probe_sem;
+
+ unsigned int thr_state;
+ int time_to_die;
+ pid_t thr_pid;
+ struct completion thr_exited;
+ struct semaphore thr_sem;
+ struct timer_list thr_timer;
+ unsigned long thr_timeout;
+};
+
+struct ata_port_operations {
+ void (*port_disable) (struct ata_port *);
+
+ void (*dev_config) (struct ata_port *, struct ata_device *);
+
+ void (*set_piomode) (struct ata_port *, struct ata_device *,
+ unsigned int);
+ void (*set_udmamode) (struct ata_port *, struct ata_device *,
+ unsigned int);
+
+ void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
+ void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
+
+ void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
+ u8 (*check_status)(struct ata_port *ap);
+
+ void (*phy_reset) (struct ata_port *ap);
+ void (*phy_config) (struct ata_port *ap);
+
+ void (*bmdma_start) (struct ata_queued_cmd *qc);
+ void (*fill_sg) (struct ata_queued_cmd *qc);
+ void (*eng_timeout) (struct ata_port *ap);
+
+ irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
+
+ u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
+ void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
+ u32 val);
+};
+
+struct ata_port_info {
+ Scsi_Host_Template *sht;
+ unsigned long host_flags;
+ unsigned long pio_mask;
+ unsigned long udma_mask;
+ struct ata_port_operations *port_ops;
+};
+
+struct pci_bits {
+ unsigned int reg; /* PCI config register to read */
+ unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
+ unsigned long mask;
+ unsigned long val;
+};
+
+extern void ata_port_probe(struct ata_port *);
+extern void pata_phy_config(struct ata_port *ap);
+extern void sata_phy_reset(struct ata_port *ap);
+extern void ata_bus_reset(struct ata_port *ap);
+extern void ata_port_disable(struct ata_port *);
+extern void ata_std_ports(struct ata_ioports *ioaddr);
+extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
+ unsigned int n_ports);
+extern void ata_pci_remove_one (struct pci_dev *pdev);
+extern int ata_device_add(struct ata_probe_ent *ent);
+extern int ata_scsi_detect(Scsi_Host_Template *sht);
+extern int ata_scsi_queuecmd(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+extern int ata_scsi_error(struct Scsi_Host *host);
+extern int ata_scsi_release(struct Scsi_Host *host);
+extern int ata_scsi_slave_config(struct scsi_device *sdev);
+/*
+ * Default driver ops implementations
+ */
+extern void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+extern u8 ata_check_status_pio(struct ata_port *ap);
+extern u8 ata_check_status_mmio(struct ata_port *ap);
+extern void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
+extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
+extern void ata_fill_sg(struct ata_queued_cmd *qc);
+extern void ata_bmdma_start_mmio (struct ata_queued_cmd *qc);
+extern void ata_bmdma_start_pio (struct ata_queued_cmd *qc);
+extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
+extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late);
+extern void ata_eng_timeout(struct ata_port *ap);
+
+
+static inline unsigned long msecs_to_jiffies(unsigned long msecs)
+{
+ return ((HZ * msecs + 999) / 1000);
+}
+
+static inline unsigned int ata_tag_valid(unsigned int tag)
+{
+ return (tag < ATA_MAX_QUEUE) ? 1 : 0;
+}
+
+static inline unsigned int ata_dev_present(struct ata_device *dev)
+{
+ return ((dev->class == ATA_DEV_ATA) ||
+ (dev->class == ATA_DEV_ATAPI));
+}
+
+static inline u8 ata_chk_err(struct ata_port *ap)
+{
+ if (ap->flags & ATA_FLAG_MMIO) {
+ return readb((void *) ap->ioaddr.error_addr);
+ }
+ return inb(ap->ioaddr.error_addr);
+}
+
+static inline u8 ata_chk_status(struct ata_port *ap)
+{
+ return ap->ops->check_status(ap);
+}
+
+static inline u8 ata_altstatus(struct ata_port *ap)
+{
+ if (ap->flags & ATA_FLAG_MMIO)
+ return readb(ap->ioaddr.ctl_addr);
+ return inb(ap->ioaddr.ctl_addr);
+}
+
+static inline void ata_pause(struct ata_port *ap)
+{
+ ata_altstatus(ap);
+ ndelay(400);
+}
+
+static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
+ unsigned int max)
+{
+ u8 status;
+
+ do {
+ udelay(10);
+ status = ata_chk_status(ap);
+ max--;
+ } while ((status & bits) && (max > 0));
+
+ return status;
+}
+
+static inline u8 ata_wait_idle(struct ata_port *ap)
+{
+ u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ unsigned long l = ap->ioaddr.cmdstat_addr;
+ printk(KERN_WARNING
+ "ATA: abnormal status 0x%X on port 0x%lX\n",
+ status, l);
+ }
+
+ return status;
+}
+
+static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap,
+ unsigned int tag)
+{
+ if (likely(ata_tag_valid(tag)))
+ return &ap->qcmd[tag];
+ return NULL;
+}
+
+static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device)
+{
+ memset(tf, 0, sizeof(*tf));
+
+ tf->ctl = ap->ctl;
+ if (device == 0)
+ tf->device = ATA_DEVICE_OBS;
+ else
+ tf->device = ATA_DEVICE_OBS | ATA_DEV1;
+}
+
+static inline u8 ata_irq_on(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ap->ctl &= ~ATA_NIEN;
+
+ if (ap->flags & ATA_FLAG_MMIO)
+ writeb(ap->ctl, ioaddr->ctl_addr);
+ else
+ outb(ap->ctl, ioaddr->ctl_addr);
+
+ return ata_wait_idle(ap);
+}
+
+static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
+{
+ unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
+ u8 host_stat, post_stat, status;
+
+ status = ata_busy_wait(ap, bits, 1000);
+ if (status & bits)
+ DPRINTK("abnormal status 0x%X\n", status);
+
+ /* get controller status; clear intr, err bits */
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void *mmio = (void *) ap->ioaddr.bmdma_addr;
+ host_stat = readb(mmio + ATA_DMA_STATUS);
+ writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+ mmio + ATA_DMA_STATUS);
+
+ post_stat = readb(mmio + ATA_DMA_STATUS);
+ } else {
+ host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+ ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+ post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ }
+
+ VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
+ host_stat, post_stat, status);
+
+ return status;
+}
+
+static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
+{
+ return ap->ops->scr_read(ap, reg);
+}
+
+static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
+{
+ ap->ops->scr_write(ap, reg, val);
+}
+
+static inline unsigned int sata_dev_present(struct ata_port *ap)
+{
+ return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
+}
+
+#endif /* __LINUX_LIBATA_H__ */
diff -Nru a/include/linux/netdevice.h b/include/linux/netdevice.h
--- a/include/linux/netdevice.h Sat Oct 25 11:45:09 2003
+++ b/include/linux/netdevice.h Sat Oct 25 11:45:09 2003
@@ -906,6 +906,9 @@
extern void dev_clear_fastroute(struct net_device *dev);
#endif
+#ifdef CONFIG_SYSCTL
+extern char *net_sysctl_strdup(const char *s);
+#endif
#endif /* __KERNEL__ */
diff -Nru a/include/linux/timex.h b/include/linux/timex.h
--- a/include/linux/timex.h Sat Oct 25 11:45:09 2003
+++ b/include/linux/timex.h Sat Oct 25 11:45:09 2003
@@ -302,6 +302,7 @@
extern long time_reftime; /* time at last adjustment (s) */
extern long time_adjust; /* The amount of adjtime left */
+extern long time_next_adjust; /* Value for time_adjust at next tick */
/* interface variables pps->timer interrupt */
extern long pps_offset; /* pps time offset (us) */
diff -Nru a/include/linux/toshiba.h b/include/linux/toshiba.h
--- a/include/linux/toshiba.h Sat Oct 25 11:45:09 2003
+++ b/include/linux/toshiba.h Sat Oct 25 11:45:09 2003
@@ -33,13 +33,4 @@
unsigned int edi __attribute__ ((packed));
} SMMRegisters;
-#ifdef CONFIG_PROC_FS
-static int tosh_get_info(char *, char **, off_t, int);
-#else /* !CONFIG_PROC_FS */
-inline int tosh_get_info(char *buffer, char **start, off_t fpos, int lenght)
-{
- return 0;
-}
-#endif /* CONFIG_PROC_FS */
-
#endif
diff -Nru a/include/net/llc.h b/include/net/llc.h
--- a/include/net/llc.h Sat Oct 25 11:45:09 2003
+++ b/include/net/llc.h Sat Oct 25 11:45:09 2003
@@ -88,4 +88,12 @@
extern int llc_station_init(void);
extern void llc_station_exit(void);
+
+#ifdef CONFIG_PROC_FS
+extern int llc_proc_init(void);
+extern void llc_proc_exit(void);
+#else
+#define llc_proc_init() (0)
+#define llc_proc_exit() do { } while(0)
+#endif /* CONFIG_PROC_FS */
#endif /* LLC_H */
diff -Nru a/include/net/llc_proc.h b/include/net/llc_proc.h
--- a/include/net/llc_proc.h Sat Oct 25 11:45:09 2003
+++ /dev/null Wed Dec 31 16:00:00 1969
@@ -1,18 +0,0 @@
-#ifndef LLC_PROC_H
-#define LLC_PROC_H
-/*
- * Copyright (c) 1997 by Procom Technology, Inc.
- * 2002 by Arnaldo Carvalho de Melo
- *
- * This program can be redistributed or modified under the terms of the
- * GNU General Public License as published by the Free Software Foundation.
- * This program is distributed without any warranty or implied warranty
- * of merchantability or fitness for a particular purpose.
- *
- * See the GNU General Public License for more details.
- */
-
-extern int llc_proc_init(void);
-extern void llc_proc_exit(void);
-
-#endif /* LLC_PROC_H */
diff -Nru a/include/net/xfrm.h b/include/net/xfrm.h
--- a/include/net/xfrm.h Sat Oct 25 11:45:09 2003
+++ b/include/net/xfrm.h Sat Oct 25 11:45:09 2003
@@ -562,6 +562,15 @@
extern struct sec_path *secpath_dup(struct sec_path *src);
+static inline void
+secpath_reset(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ secpath_put(skb->sp);
+ skb->sp = NULL;
+#endif
+}
+
static inline int
__xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
{
diff -Nru a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
--- a/include/scsi/scsi_device.h Sat Oct 25 11:45:09 2003
+++ b/include/scsi/scsi_device.h Sat Oct 25 11:45:09 2003
@@ -86,6 +86,8 @@
* because we did a bus reset. */
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
+ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
unsigned no_start_on_add:1; /* do not issue start on add */
unsigned int device_blocked; /* Device returned QUEUE_FULL. */
diff -Nru a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
--- /dev/null Wed Dec 31 16:00:00 1969
+++ b/include/scsi/scsi_devinfo.h Sat Oct 25 11:45:09 2003
@@ -0,0 +1,22 @@
+#ifndef _SCSI_SCSI_DEVINFO_H
+#define _SCSI_SCSI_DEVINFO_H
+/*
+ * Flags for SCSI devices that need special treatment
+ */
+#define BLIST_NOLUN 0x001 /* Only scan LUN 0 */
+#define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning */
+#define BLIST_BORKEN 0x004 /* Flag for broken handshaking */
+#define BLIST_KEY 0x008 /* unlock by special command */
+#define BLIST_SINGLELUN 0x010 /* Do not use LUNs in parallel */
+#define BLIST_NOTQ 0x020 /* Buggy Tagged Command Queuing */
+#define BLIST_SPARSELUN 0x040 /* Non consecutive LUN numbering */
+#define BLIST_MAX5LUN 0x080 /* Avoid LUNS >= 5 */
+#define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */
+#define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */
+#define BLIST_INQUIRY_36 0x400 /* override additional length field */
+#define BLIST_INQUIRY_58 0x800 /* ... for broken inquiry responses */
+#define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */
+#define BLIST_MS_SKIP_PAGE_08 0x2000 /* do not send ms page 0x08 */
+#define BLIST_MS_SKIP_PAGE_3F 0x4000 /* do not send ms page 0x3f */
+#define BLIST_USE_10_BYTE_MS 0x8000 /* use 10 byte ms before 6 byte ms */
+#endif
diff -Nru a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
--- a/include/scsi/scsi_host.h Sat Oct 25 11:45:09 2003
+++ b/include/scsi/scsi_host.h Sat Oct 25 11:45:09 2003
@@ -344,6 +344,12 @@
* module_init/module_exit.
*/
struct list_head legacy_hosts;
+
+ /*
+ * Default flags settings, these modify the setting of scsi_device
+ * bits.
+ */
+ unsigned int flags;
};
/*
diff -Nru a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
--- a/init/do_mounts_initrd.c Sat Oct 25 11:45:09 2003
+++ b/init/do_mounts_initrd.c Sat Oct 25 11:45:09 2003
@@ -109,12 +109,12 @@
* in that case the ram disk is just set up here, and gets
* mounted in the normal path.
*/
- if (rd_load_image("/dev/initrd") && ROOT_DEV != Root_RAM0) {
- sys_unlink("/dev/initrd");
+ if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
+ sys_unlink("/initrd.image");
handle_initrd();
return 1;
}
}
- sys_unlink("/dev/initrd");
+ sys_unlink("/initrd.image");
return 0;
}
diff -Nru a/init/do_mounts_rd.c b/init/do_mounts_rd.c
--- a/init/do_mounts_rd.c Sat Oct 25 11:45:09 2003
+++ b/init/do_mounts_rd.c Sat Oct 25 11:45:09 2003
@@ -185,7 +185,7 @@
else
devblocks >>= 1;
- if (strcmp(from, "/dev/initrd") == 0)
+ if (strcmp(from, "/initrd.image") == 0)
devblocks = nblocks;
if (devblocks == 0) {
diff -Nru a/init/initramfs.c b/init/initramfs.c
--- a/init/initramfs.c Sat Oct 25 11:45:09 2003
+++ b/init/initramfs.c Sat Oct 25 11:45:09 2003
@@ -497,7 +497,7 @@
return;
}
printk("it isn't (%s); looks like an initrd\n", err);
- fd = sys_open("/dev/initrd", O_WRONLY|O_CREAT, 700);
+ fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 700);
if (fd >= 0) {
sys_write(fd, (char *)initrd_start,
initrd_end - initrd_start);
diff -Nru a/init/main.c b/init/main.c
--- a/init/main.c Sat Oct 25 11:45:09 2003
+++ b/init/main.c Sat Oct 25 11:45:09 2003
@@ -94,7 +94,7 @@
* Are we up and running (ie do we have all the infrastructure
* set up)
*/
-int system_running = 0;
+int system_running;
/*
* Boot command-line arguments
diff -Nru a/ipc/msg.c b/ipc/msg.c
--- a/ipc/msg.c Sat Oct 25 11:45:09 2003
+++ b/ipc/msg.c Sat Oct 25 11:45:09 2003
@@ -837,11 +837,20 @@
msg_unlock(msq);
schedule();
- current->state = TASK_RUNNING;
+ /*
+ * The below optimisation is buggy. A sleeping thread that is
+ * woken up checks if it got a message and if so, copies it to
+ * userspace and just returns without taking any locks.
+ * But this return to user space can be faster than the message
+ * send, and if the receiver immediately exits the
+ * wake_up_process performed by the sender will oops.
+ */
+#if 0
msg = (struct msg_msg*) msr_d.r_msg;
if(!IS_ERR(msg))
goto out_success;
+#endif
msq = msg_lock(msqid);
msg = (struct msg_msg*)msr_d.r_msg;
diff -Nru a/kernel/kmod.c b/kernel/kmod.c
--- a/kernel/kmod.c Sat Oct 25 11:45:09 2003
+++ b/kernel/kmod.c Sat Oct 25 11:45:09 2003
@@ -36,7 +36,7 @@
#include
#include
-extern int max_threads, system_running;
+extern int max_threads;
#ifdef CONFIG_KMOD
diff -Nru a/kernel/resource.c b/kernel/resource.c
--- a/kernel/resource.c Sat Oct 25 11:45:09 2003
+++ b/kernel/resource.c Sat Oct 25 11:45:09 2003
@@ -206,6 +206,18 @@
EXPORT_SYMBOL(request_resource);
+struct resource *____request_resource(struct resource *root, struct resource *new)
+{
+ struct resource *conflict;
+
+ write_lock(&resource_lock);
+ conflict = __request_resource(root, new);
+ write_unlock(&resource_lock);
+ return conflict;
+}
+
+EXPORT_SYMBOL(____request_resource);
+
int release_resource(struct resource *old)
{
int retval;
diff -Nru a/kernel/sched.c b/kernel/sched.c
--- a/kernel/sched.c Sat Oct 25 11:45:09 2003
+++ b/kernel/sched.c Sat Oct 25 11:45:09 2003
@@ -2848,7 +2848,7 @@
#if defined(in_atomic)
static unsigned long prev_jiffy; /* ratelimiting */
- if (in_atomic() || irqs_disabled()) {
+ if ((in_atomic() || irqs_disabled()) && system_running) {
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
diff -Nru a/kernel/sys.c b/kernel/sys.c
--- a/kernel/sys.c Sat Oct 25 11:45:09 2003
+++ b/kernel/sys.c Sat Oct 25 11:45:09 2003
@@ -78,8 +78,6 @@
int C_A_D = 1;
int cad_pid = 1;
-extern int system_running;
-
/*
* Notifier list for kernel code which wants to be called
* at shutdown. This is used to stop any idling DMA operations
diff -Nru a/kernel/time.c b/kernel/time.c
--- a/kernel/time.c Sat Oct 25 11:45:09 2003
+++ b/kernel/time.c Sat Oct 25 11:45:09 2003
@@ -236,7 +236,7 @@
result = time_state; /* mostly `TIME_OK' */
/* Save for later - semantics of adjtime is to return old value */
- save_adjust = time_adjust;
+ save_adjust = time_next_adjust ? time_next_adjust : time_adjust;
#if 0 /* STA_CLOCKERR is never set yet */
time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */
@@ -283,7 +283,8 @@
if (txc->modes & ADJ_OFFSET) { /* values checked earlier */
if (txc->modes == ADJ_OFFSET_SINGLESHOT) {
/* adjtime() is independent from ntp_adjtime() */
- time_adjust = txc->offset;
+ if ((time_next_adjust = txc->offset) == 0)
+ time_adjust = 0;
}
else if ( time_status & (STA_PLL | STA_PPSTIME) ) {
ltemp = (time_status & (STA_PPSTIME | STA_PPSSIGNAL)) ==
diff -Nru a/kernel/timer.c b/kernel/timer.c
--- a/kernel/timer.c Sat Oct 25 11:45:09 2003
+++ b/kernel/timer.c Sat Oct 25 11:45:09 2003
@@ -474,6 +474,7 @@
long time_adj; /* tick adjust (scaled 1 / HZ) */
long time_reftime; /* time at last adjustment (s) */
long time_adjust;
+long time_next_adjust;
/*
* this routine handles the overflow of the microsecond field
@@ -654,6 +655,12 @@
}
xtime.tv_nsec += delta_nsec;
time_interpolator_update(delta_nsec);
+
+ /* Changes by adjtime() do not take effect till next tick. */
+ if (time_next_adjust != 0) {
+ time_adjust = time_next_adjust;
+ time_next_adjust = 0;
+ }
}
/*
diff -Nru a/lib/string.c b/lib/string.c
--- a/lib/string.c Sat Oct 25 11:45:09 2003
+++ b/lib/string.c Sat Oct 25 11:45:09 2003
@@ -437,14 +437,12 @@
* You should not use this function to access IO space, use memcpy_toio()
* or memcpy_fromio() instead.
*/
-char * bcopy(const char * src, char * dest, int count)
+void bcopy(const char * src, char * dest, int count)
{
char *tmp = dest;
while (count--)
*tmp++ = *src++;
-
- return dest;
}
#endif
diff -Nru a/lib/vsprintf.c b/lib/vsprintf.c
--- a/lib/vsprintf.c Sat Oct 25 11:45:09 2003
+++ b/lib/vsprintf.c Sat Oct 25 11:45:09 2003
@@ -348,7 +348,7 @@
case 's':
s = va_arg(args, char *);
- if (!s)
+ if ((unsigned long)s < PAGE_SIZE)
s = "";
len = strnlen(s, precision);
diff -Nru a/mm/filemap.c b/mm/filemap.c
--- a/mm/filemap.c Sat Oct 25 11:45:09 2003
+++ b/mm/filemap.c Sat Oct 25 11:45:09 2003
@@ -61,6 +61,9 @@
* ->swap_device_lock (exclusive_swap_page, others)
* ->mapping->page_lock
*
+ * ->i_sem
+ * ->i_shared_sem (truncate->invalidate_mmap_range)
+ *
* ->mmap_sem
* ->i_shared_sem (various places)
*
diff -Nru a/mm/memory.c b/mm/memory.c
--- a/mm/memory.c Sat Oct 25 11:45:09 2003
+++ b/mm/memory.c Sat Oct 25 11:45:09 2003
@@ -525,7 +525,7 @@
unsigned long end_addr, unsigned long *nr_accounted)
{
unsigned long zap_bytes = ZAP_BLOCK_SIZE;
- unsigned long tlb_start; /* For tlb_finish_mmu */
+ unsigned long tlb_start = 0; /* For tlb_finish_mmu */
int tlb_start_valid = 0;
int ret = 0;
diff -Nru a/mm/mmap.c b/mm/mmap.c
--- a/mm/mmap.c Sat Oct 25 11:45:09 2003
+++ b/mm/mmap.c Sat Oct 25 11:45:09 2003
@@ -280,6 +280,26 @@
}
/*
+ * Insert vm structure into process list sorted by address and into the inode's
+ * i_mmap ring. The caller should hold mm->page_table_lock and
+ * ->f_mappping->i_shared_sem if vm_file is non-NULL.
+ */
+static void
+__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+{
+ struct vm_area_struct * __vma, * prev;
+ struct rb_node ** rb_link, * rb_parent;
+
+ __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
+ if (__vma && __vma->vm_start < vma->vm_end)
+ BUG();
+ __vma_link(mm, vma, prev, rb_link, rb_parent);
+ mark_mm_hugetlb(mm, vma);
+ mm->map_count++;
+ validate_mm(mm);
+}
+
+/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
@@ -351,7 +371,9 @@
unsigned long end, unsigned long vm_flags,
struct file *file, unsigned long pgoff)
{
- spinlock_t * lock = &mm->page_table_lock;
+ spinlock_t *lock = &mm->page_table_lock;
+ struct inode *inode = file ? file->f_dentry->d_inode : NULL;
+ struct semaphore *i_shared_sem;
/*
* We later require that vma->vm_flags == vm_flags, so this tests
@@ -360,6 +382,8 @@
if (vm_flags & VM_SPECIAL)
return 0;
+ i_shared_sem = file ? &inode->i_mapping->i_shared_sem : NULL;
+
if (!prev) {
prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
goto merge_next;
@@ -372,12 +396,11 @@
is_mergeable_vma(prev, file, vm_flags) &&
can_vma_merge_after(prev, vm_flags, file, pgoff)) {
struct vm_area_struct *next;
- struct inode *inode = file ? file->f_dentry->d_inode : NULL;
int need_up = 0;
if (unlikely(file && prev->vm_next &&
prev->vm_next->vm_file == file)) {
- down(&inode->i_mapping->i_shared_sem);
+ down(i_shared_sem);
need_up = 1;
}
spin_lock(lock);
@@ -395,7 +418,7 @@
__remove_shared_vm_struct(next, inode);
spin_unlock(lock);
if (need_up)
- up(&inode->i_mapping->i_shared_sem);
+ up(i_shared_sem);
if (file)
fput(file);
@@ -405,7 +428,7 @@
}
spin_unlock(lock);
if (need_up)
- up(&inode->i_mapping->i_shared_sem);
+ up(i_shared_sem);
return 1;
}
@@ -419,10 +442,14 @@
pgoff, (end - addr) >> PAGE_SHIFT))
return 0;
if (end == prev->vm_start) {
+ if (file)
+ down(i_shared_sem);
spin_lock(lock);
prev->vm_start = addr;
prev->vm_pgoff -= (end - addr) >> PAGE_SHIFT;
spin_unlock(lock);
+ if (file)
+ up(i_shared_sem);
return 1;
}
}
@@ -1142,6 +1169,7 @@
unsigned long addr, int new_below)
{
struct vm_area_struct *new;
+ struct address_space *mapping = NULL;
if (mm->map_count >= MAX_MAP_COUNT)
return -ENOMEM;
@@ -1155,12 +1183,9 @@
INIT_LIST_HEAD(&new->shared);
- if (new_below) {
+ if (new_below)
new->vm_end = addr;
- vma->vm_start = addr;
- vma->vm_pgoff += ((addr - new->vm_start) >> PAGE_SHIFT);
- } else {
- vma->vm_end = addr;
+ else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -1171,7 +1196,25 @@
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- insert_vm_struct(mm, new);
+ if (vma->vm_file)
+ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ if (mapping)
+ down(&mapping->i_shared_sem);
+ spin_lock(&mm->page_table_lock);
+
+ if (new_below) {
+ vma->vm_start = addr;
+ vma->vm_pgoff += ((addr - new->vm_start) >> PAGE_SHIFT);
+ } else
+ vma->vm_end = addr;
+
+ __insert_vm_struct(mm, new);
+
+ spin_unlock(&mm->page_table_lock);
+ if (mapping)
+ up(&mapping->i_shared_sem);
+
return 0;
}
diff -Nru a/mm/shmem.c b/mm/shmem.c
--- a/mm/shmem.c Sat Oct 25 11:45:09 2003
+++ b/mm/shmem.c Sat Oct 25 11:45:09 2003
@@ -52,6 +52,10 @@
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
+/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
+#define SHMEM_PAGEIN VM_READ
+#define SHMEM_TRUNCATE VM_WRITE
+
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
@@ -390,6 +394,7 @@
return;
spin_lock(&info->lock);
+ info->flags |= SHMEM_TRUNCATE;
limit = info->next_index;
info->next_index = idx;
if (info->swapped && idx < SHMEM_NR_DIRECT) {
@@ -490,6 +495,19 @@
}
done2:
BUG_ON(info->swapped > info->next_index);
+ if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
+ /*
+ * Call truncate_inode_pages again: racing shmem_unuse_inode
+ * may have swizzled a page in from swap since vmtruncate or
+ * generic_delete_inode did it, before we lowered next_index.
+ * Also, though shmem_getpage checks i_size before adding to
+ * cache, no recheck after: so fix the narrow window there too.
+ */
+ spin_unlock(&info->lock);
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ spin_lock(&info->lock);
+ }
+ info->flags &= ~SHMEM_TRUNCATE;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
}
@@ -524,6 +542,19 @@
attr->ia_size>>PAGE_CACHE_SHIFT,
&page, SGP_READ);
}
+ /*
+ * Reset SHMEM_PAGEIN flag so that shmem_truncate can
+ * detect if any pages might have been added to cache
+ * after truncate_inode_pages. But we needn't bother
+ * if it's being fully truncated to zero-length: the
+ * nrpages check is efficient enough in that case.
+ */
+ if (attr->ia_size) {
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ spin_lock(&info->lock);
+ info->flags &= ~SHMEM_PAGEIN;
+ spin_unlock(&info->lock);
+ }
}
}
@@ -638,14 +669,10 @@
found:
idx += offset;
inode = &info->vfs_inode;
-
- /* Racing against delete or truncate? Must leave out of page cache */
- limit = (inode->i_state & I_FREEING)? 0:
- (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
- if (idx >= limit ||
- move_from_swap_cache(page, idx, inode->i_mapping) == 0)
+ if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
+ info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, ptr + offset, 0);
+ }
shmem_swp_unmap(ptr);
spin_unlock(&info->lock);
/*
@@ -653,7 +680,7 @@
* try_to_unuse will skip over mms, then reincrement count.
*/
swap_free(entry);
- return idx < limit;
+ return 1;
}
/*
@@ -706,7 +733,10 @@
spin_lock(&info->lock);
shmem_recalc_inode(inode);
- BUG_ON(index >= info->next_index);
+ if (index >= info->next_index) {
+ BUG_ON(!(info->flags & SHMEM_TRUNCATE));
+ goto unlock;
+ }
entry = shmem_swp_entry(info, index, NULL);
BUG_ON(!entry);
BUG_ON(entry->val);
@@ -720,6 +750,7 @@
}
shmem_swp_unmap(entry);
+unlock:
spin_unlock(&info->lock);
swap_free(swap);
redirty:
@@ -841,6 +872,7 @@
swap_free(swap);
} else if (!(error = move_from_swap_cache(
swappage, idx, mapping))) {
+ info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
@@ -910,6 +942,7 @@
goto failed;
goto repeat;
}
+ info->flags |= SHMEM_PAGEIN;
}
info->alloced++;
@@ -1206,12 +1239,11 @@
pos += bytes;
buf += bytes;
if (pos > inode->i_size)
- inode->i_size = pos;
+ i_size_write(inode, pos);
flush_dcache_page(page);
set_page_dirty(page);
- if (!PageReferenced(page))
- SetPageReferenced(page);
+ mark_page_accessed(page);
page_cache_release(page);
if (left) {
@@ -1395,6 +1427,11 @@
int error = -ENOSPC;
if (inode) {
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ inode->i_mode |= S_ISGID;
+ }
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
@@ -1531,6 +1568,8 @@
set_page_dirty(page);
page_cache_release(page);
}
+ if (dir->i_mode & S_ISGID)
+ inode->i_gid = dir->i_gid;
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
diff -Nru a/mm/slab.c b/mm/slab.c
--- a/mm/slab.c Sat Oct 25 11:45:09 2003
+++ b/mm/slab.c Sat Oct 25 11:45:09 2003
@@ -793,42 +793,42 @@
__initcall(cpucache_init);
-/* Interface to system's page allocator. No need to hold the cache-lock.
+/*
+ * Interface to system's page allocator. No need to hold the cache-lock.
+ *
+ * If we requested dmaable memory, we will get it. Even if we
+ * did not request dmaable memory, we might get it, but that
+ * would be relatively rare and ignorable.
*/
-static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
+static inline void *kmem_getpages(kmem_cache_t *cachep, unsigned long flags)
{
- void *addr;
+ void *addr;
- /*
- * If we requested dmaable memory, we will get it. Even if we
- * did not request dmaable memory, we might get it, but that
- * would be relatively rare and ignorable.
- */
flags |= cachep->gfpflags;
- if ( cachep->flags & SLAB_RECLAIM_ACCOUNT)
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
atomic_add(1<gfporder, &slab_reclaim_pages);
- addr = (void*) __get_free_pages(flags, cachep->gfporder);
- /* Assume that now we have the pages no one else can legally
- * messes with the 'struct page's.
- * However vm_scan() might try to test the structure to see if
- * it is a named-page or buffer-page. The members it tests are
- * of no interest here.....
- */
+ addr = (void*)__get_free_pages(flags, cachep->gfporder);
+ if (addr) {
+ int i = (1 << cachep->gfporder);
+ struct page *page = virt_to_page(addr);
+
+ while (i--) {
+ SetPageSlab(page);
+ page++;
+ }
+ }
return addr;
}
-/* Interface to system's page release. */
-static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
+/*
+ * Interface to system's page release.
+ */
+static inline void kmem_freepages(kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
- /* free_pages() does not clear the type bit - we do that.
- * The pages have been unlinked from their cache-slab,
- * but their 'struct page's might be accessed in
- * vm_scan(). Shouldn't be a worry.
- */
while (i--) {
if (!TestClearPageSlab(page))
BUG();
@@ -1608,7 +1608,6 @@
do {
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
- SetPageSlab(page);
inc_page_state(nr_slab);
page++;
} while (--i);
diff -Nru a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
--- a/net/bluetooth/af_bluetooth.c Sat Oct 25 11:45:09 2003
+++ b/net/bluetooth/af_bluetooth.c Sat Oct 25 11:45:09 2003
@@ -56,9 +56,7 @@
#define BT_DBG( A... )
#endif
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_bt;
-#endif
/* Bluetooth sockets */
#define BT_MAX_PROTO 5
diff -Nru a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
--- a/net/bridge/br_netfilter.c Sat Oct 25 11:45:09 2003
+++ b/net/bridge/br_netfilter.c Sat Oct 25 11:45:09 2003
@@ -217,6 +217,7 @@
}
memcpy(skb->mac.ethernet->h_dest, dev->dev_addr,
ETH_ALEN);
+ skb->pkt_type = PACKET_HOST;
}
} else {
skb->dst = (struct dst_entry *)&__fake_rtable;
diff -Nru a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
--- a/net/bridge/netfilter/ebt_redirect.c Sat Oct 25 11:45:09 2003
+++ b/net/bridge/netfilter/ebt_redirect.c Sat Oct 25 11:45:09 2003
@@ -34,11 +34,10 @@
if (hooknr != NF_BR_BROUTING)
memcpy((**pskb).mac.ethernet->h_dest,
in->br_port->br->dev->dev_addr, ETH_ALEN);
- else {
+ else
memcpy((**pskb).mac.ethernet->h_dest,
in->dev_addr, ETH_ALEN);
- (*pskb)->pkt_type = PACKET_HOST;
- }
+ (*pskb)->pkt_type = PACKET_HOST;
return info->target;
}
diff -Nru a/net/compat.c b/net/compat.c
--- a/net/compat.c Sat Oct 25 11:45:09 2003
+++ b/net/compat.c Sat Oct 25 11:45:09 2003
@@ -317,12 +317,12 @@
char *optval, int optlen)
{
struct compat_ipt_replace *urepl = (struct compat_ipt_replace *)optval;
- struct ipt_replace *krepl;
- u32 origsize;
- unsigned int kreplsize;
- mm_segment_t old_fs;
+ struct ipt_replace *repl_nat;
+ char name[IPT_TABLE_MAXNAMELEN];
+ u32 origsize, tmp32, num_counters;
+ unsigned int repl_nat_size;
int ret;
- int i;
+ int i, num_ents;
compat_uptr_t ucntrs;
if (get_user(origsize, &urepl->size))
@@ -335,25 +335,53 @@
/* XXX Assumes that size of ipt_entry is the same both in
* native and compat environments.
*/
- kreplsize = sizeof(*krepl) + origsize;
- krepl = (struct ipt_replace *)kmalloc(kreplsize, GFP_KERNEL);
- if (krepl == NULL)
- return -ENOMEM;
+ repl_nat_size = sizeof(*repl_nat) + origsize;
+ repl_nat = compat_alloc_user_space(repl_nat_size);
ret = -EFAULT;
- krepl->size = origsize;
+ if (put_user(origsize, &repl_nat->size))
+ goto out;
+
if (!access_ok(VERIFY_READ, urepl, optlen) ||
- __copy_from_user(krepl->name, urepl->name, sizeof(urepl->name)) ||
- __get_user(krepl->valid_hooks, &urepl->valid_hooks) ||
- __get_user(krepl->num_entries, &urepl->num_entries) ||
- __get_user(krepl->num_counters, &urepl->num_counters) ||
- __get_user(ucntrs, &urepl->counters) ||
- __copy_from_user(krepl->entries, &urepl->entries, origsize))
- goto out_free;
+ !access_ok(VERIFY_WRITE, repl_nat, optlen))
+ goto out;
+
+ if (__copy_from_user(name, urepl->name, sizeof(urepl->name)) ||
+ __copy_to_user(repl_nat->name, name, sizeof(repl_nat->name)))
+ goto out;
+
+ if (__get_user(tmp32, &urepl->valid_hooks) ||
+ __put_user(tmp32, &repl_nat->valid_hooks))
+ goto out;
+
+ if (__get_user(tmp32, &urepl->num_entries) ||
+ __put_user(tmp32, &repl_nat->num_entries))
+ goto out;
+
+ if (__get_user(num_counters, &urepl->num_counters) ||
+ __put_user(num_counters, &repl_nat->num_counters))
+ goto out;
+
+ if (__get_user(ucntrs, &urepl->counters) ||
+ __put_user(compat_ptr(ucntrs), &repl_nat->counters))
+ goto out;
+
+ num_ents = origsize / sizeof(struct ipt_entry);
+
+ for (i = 0; i < num_ents; i++) {
+ struct ipt_entry ent;
+
+ if (__copy_from_user(&ent, &urepl->entries[i], sizeof(ent)) ||
+ __copy_to_user(&repl_nat->entries[i], &ent, sizeof(ent)))
+ goto out;
+ }
+
for (i = 0; i < NF_IP_NUMHOOKS; i++) {
- if (__get_user(krepl->hook_entry[i], &urepl->hook_entry[i]) ||
- __get_user(krepl->underflow[i], &urepl->underflow[i]))
- goto out_free;
+ if (__get_user(tmp32, &urepl->hook_entry[i]) ||
+ __put_user(tmp32, &repl_nat->hook_entry[i]) ||
+ __get_user(tmp32, &urepl->underflow[i]) ||
+ __put_user(tmp32, &repl_nat->underflow[i]))
+ goto out;
}
/*
@@ -362,18 +390,15 @@
* pointer into the standard syscall. We hope that the pointer is
* not misaligned ...
*/
- krepl->counters = compat_ptr(ucntrs);
- if (!access_ok(VERIFY_WRITE, krepl->counters,
- krepl->num_counters * sizeof(struct ipt_counters)))
- goto out_free;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_setsockopt(fd, level, optname, (char *)krepl, kreplsize);
- set_fs(old_fs);
+ if (!access_ok(VERIFY_WRITE, compat_ptr(ucntrs),
+ num_counters * sizeof(struct ipt_counters)))
+ goto out;
+
+
+ ret = sys_setsockopt(fd, level, optname,
+ (char *)repl_nat, repl_nat_size);
-out_free:
- kfree(krepl);
+out:
return ret;
}
diff -Nru a/net/core/dev.c b/net/core/dev.c
--- a/net/core/dev.c Sat Oct 25 11:45:09 2003
+++ b/net/core/dev.c Sat Oct 25 11:45:09 2003
@@ -2634,9 +2634,14 @@
dev->iflink = -1;
/* Init, if this function is available */
- ret = -EIO;
- if (dev->init && dev->init(dev))
- goto out_err;
+ if (dev->init) {
+ ret = dev->init(dev);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out_err;
+ }
+ }
dev->ifindex = dev_new_index();
if (dev->iflink == -1)
diff -Nru a/net/core/neighbour.c b/net/core/neighbour.c
--- a/net/core/neighbour.c Sat Oct 25 11:45:09 2003
+++ b/net/core/neighbour.c Sat Oct 25 11:45:09 2003
@@ -1628,6 +1628,9 @@
int p_id, int pdev_id, char *p_name)
{
struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+ const char *dev_name_source = NULL;
+ char *dev_name = NULL;
+ int err = 0;
if (!t)
return -ENOBUFS;
@@ -1644,8 +1647,10 @@
t->neigh_vars[9].data = &p->anycast_delay;
t->neigh_vars[10].data = &p->proxy_delay;
t->neigh_vars[11].data = &p->locktime;
+
+ dev_name_source = t->neigh_dev[0].procname;
if (dev) {
- t->neigh_dev[0].procname = dev->name;
+ dev_name_source = dev->name;
t->neigh_dev[0].ctl_name = dev->ifindex;
memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
} else {
@@ -1654,6 +1659,15 @@
t->neigh_vars[14].data = (int *)(p + 1) + 2;
t->neigh_vars[15].data = (int *)(p + 1) + 3;
}
+
+ dev_name = net_sysctl_strdup(dev_name_source);
+ if (!dev_name) {
+ err = -ENOBUFS;
+ goto free;
+ }
+
+ t->neigh_dev[0].procname = dev_name;
+
t->neigh_neigh_dir[0].ctl_name = pdev_id;
t->neigh_proto_dir[0].procname = p_name;
@@ -1666,11 +1680,19 @@
t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
if (!t->sysctl_header) {
- kfree(t);
- return -ENOBUFS;
+ err = -ENOBUFS;
+ goto free_procname;
}
p->sysctl_table = t;
return 0;
+
+ /* error path */
+ free_procname:
+ kfree(dev_name);
+ free:
+ kfree(t);
+
+ return err;
}
void neigh_sysctl_unregister(struct neigh_parms *p)
@@ -1679,6 +1701,7 @@
struct neigh_sysctl_table *t = p->sysctl_table;
p->sysctl_table = NULL;
unregister_sysctl_table(t->sysctl_header);
+ kfree(t->neigh_dev[0].procname);
kfree(t);
}
}
diff -Nru a/net/core/net-sysfs.c b/net/core/net-sysfs.c
--- a/net/core/net-sysfs.c Sat Oct 25 11:45:09 2003
+++ b/net/core/net-sysfs.c Sat Oct 25 11:45:09 2003
@@ -408,6 +408,7 @@
class_dev->class = &net_class;
class_dev->class_data = net;
+ net->last_stats = net->get_stats;
strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE);
if ((ret = class_device_register(class_dev)))
@@ -419,7 +420,6 @@
}
- net->last_stats = net->get_stats;
if (net->get_stats &&
(ret = sysfs_create_group(&class_dev->kobj, &netstat_group)))
goto out_unreg;
diff -Nru a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
--- a/net/core/sysctl_net_core.c Sat Oct 25 11:45:09 2003
+++ b/net/core/sysctl_net_core.c Sat Oct 25 11:45:09 2003
@@ -8,6 +8,7 @@
#include
#include
#include
+#include
#ifdef CONFIG_SYSCTL
@@ -33,6 +34,19 @@
extern char sysctl_divert_version[];
#endif /* CONFIG_NET_DIVERT */
+/*
+ * This strdup() is used for creating copies of network
+ * device names to be handed over to sysctl.
+ */
+
+char *net_sysctl_strdup(const char *s)
+{
+ char *rv = kmalloc(strlen(s)+1, GFP_KERNEL);
+ if (rv)
+ strcpy(rv, s);
+ return rv;
+}
+
ctl_table core_table[] = {
#ifdef CONFIG_NET
{
@@ -162,4 +176,7 @@
#endif /* CONFIG_NET */
{ .ctl_name = 0 }
};
+
+EXPORT_SYMBOL(net_sysctl_strdup);
+
#endif
diff -Nru a/net/ipv4/devinet.c b/net/ipv4/devinet.c
--- a/net/ipv4/devinet.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv4/devinet.c Sat Oct 25 11:45:09 2003
@@ -905,6 +905,14 @@
* not interesting to applications using netlink.
*/
inetdev_changename(dev, in_dev);
+
+#ifdef CONFIG_SYSCTL
+ devinet_sysctl_unregister(&in_dev->cnf);
+ neigh_sysctl_unregister(in_dev->arp_parms);
+ neigh_sysctl_register(dev, in_dev->arp_parms, NET_IPV4,
+ NET_IPV4_NEIGH, "ipv4");
+ devinet_sysctl_register(in_dev, &in_dev->cnf);
+#endif
break;
}
out:
@@ -1302,6 +1310,7 @@
int i;
struct net_device *dev = in_dev ? in_dev->dev : NULL;
struct devinet_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+ char *dev_name = NULL;
if (!t)
return;
@@ -1310,13 +1319,25 @@
t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
t->devinet_vars[i].de = NULL;
}
+
if (dev) {
- t->devinet_dev[0].procname = dev->name;
+ dev_name = dev->name;
t->devinet_dev[0].ctl_name = dev->ifindex;
} else {
- t->devinet_dev[0].procname = "default";
+ dev_name = "default";
t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT;
}
+
+ /*
+ * Make a copy of dev_name, because '.procname' is regarded as const
+ * by sysctl and we wouldn't want anyone to change it under our feet
+ * (see SIOCSIFNAME).
+ */
+ dev_name = net_sysctl_strdup(dev_name);
+ if (!dev_name)
+ goto free;
+
+ t->devinet_dev[0].procname = dev_name;
t->devinet_dev[0].child = t->devinet_vars;
t->devinet_dev[0].de = NULL;
t->devinet_conf_dir[0].child = t->devinet_dev;
@@ -1328,9 +1349,17 @@
t->sysctl_header = register_sysctl_table(t->devinet_root_dir, 0);
if (!t->sysctl_header)
- kfree(t);
- else
- p->sysctl = t;
+ goto free_procname;
+
+ p->sysctl = t;
+ return;
+
+ /* error path */
+ free_procname:
+ kfree(dev_name);
+ free:
+ kfree(t);
+ return;
}
static void devinet_sysctl_unregister(struct ipv4_devconf *p)
@@ -1339,6 +1368,7 @@
struct devinet_sysctl_table *t = p->sysctl;
p->sysctl = NULL;
unregister_sysctl_table(t->sysctl_header);
+ kfree(t->devinet_dev[0].procname);
kfree(t);
}
}
diff -Nru a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
--- a/net/ipv4/ip_gre.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv4/ip_gre.c Sat Oct 25 11:45:09 2003
@@ -601,8 +601,7 @@
read_lock(&ipgre_lock);
if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
- secpath_put(skb->sp);
- skb->sp = NULL;
+ secpath_reset(skb);
skb->mac.raw = skb->nh.raw;
skb->nh.raw = __pskb_pull(skb, offset);
diff -Nru a/net/ipv4/ipip.c b/net/ipv4/ipip.c
--- a/net/ipv4/ipip.c Sat Oct 25 11:45:10 2003
+++ b/net/ipv4/ipip.c Sat Oct 25 11:45:10 2003
@@ -483,8 +483,7 @@
return 0;
}
- secpath_put(skb->sp);
- skb->sp = NULL;
+ secpath_reset(skb);
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
diff -Nru a/net/ipv4/tcp.c b/net/ipv4/tcp.c
--- a/net/ipv4/tcp.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv4/tcp.c Sat Oct 25 11:45:09 2003
@@ -1540,17 +1540,6 @@
if (copied && tp->urg_data && tp->urg_seq == *seq)
break;
- /* We need to check signals first, to get correct SIGURG
- * handling. FIXME: Need to check this doesn't impact 1003.1g
- * and move it down to the bottom of the loop
- */
- if (signal_pending(current)) {
- if (copied)
- break;
- copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
- break;
- }
-
/* Next get a buffer. */
skb = skb_peek(&sk->sk_receive_queue);
@@ -1587,6 +1576,7 @@
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
+ signal_pending(current) ||
(flags & MSG_PEEK))
break;
} else {
@@ -1614,6 +1604,11 @@
if (!timeo) {
copied = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ copied = sock_intr_errno(timeo);
break;
}
}
diff -Nru a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
--- a/net/ipv6/addrconf.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv6/addrconf.c Sat Oct 25 11:45:09 2003
@@ -1880,6 +1880,14 @@
break;
case NETDEV_CHANGE:
break;
+ case NETDEV_CHANGENAME:
+#ifdef CONFIG_SYSCTL
+ addrconf_sysctl_unregister(&idev->cnf);
+ neigh_sysctl_unregister(idev->nd_parms);
+ neigh_sysctl_register(dev, idev->nd_parms, NET_IPV6, NET_IPV6_NEIGH, "ipv6");
+ addrconf_sysctl_register(idev, &idev->cnf);
+#endif
+ break;
};
return NOTIFY_OK;
@@ -3037,6 +3045,7 @@
int i;
struct net_device *dev = idev ? idev->dev : NULL;
struct addrconf_sysctl_table *t;
+ char *dev_name = NULL;
t = kmalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL)
@@ -3048,12 +3057,24 @@
t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
}
if (dev) {
- t->addrconf_dev[0].procname = dev->name;
+ dev_name = dev->name;
t->addrconf_dev[0].ctl_name = dev->ifindex;
} else {
- t->addrconf_dev[0].procname = "default";
+ dev_name = "default";
t->addrconf_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT;
}
+
+ /*
+ * Make a copy of dev_name, because '.procname' is regarded as const
+ * by sysctl and we wouldn't want anyone to change it under our feet
+ * (see SIOCSIFNAME).
+ */
+ dev_name = net_sysctl_strdup(dev_name);
+ if (!dev_name)
+ goto free;
+
+ t->addrconf_dev[0].procname = dev_name;
+
t->addrconf_dev[0].child = t->addrconf_vars;
t->addrconf_dev[0].de = NULL;
t->addrconf_conf_dir[0].child = t->addrconf_dev;
@@ -3065,9 +3086,18 @@
t->sysctl_header = register_sysctl_table(t->addrconf_root_dir, 0);
if (t->sysctl_header == NULL)
- kfree(t);
+ goto free_procname;
else
p->sysctl = t;
+ return;
+
+ /* error path */
+ free_procname:
+ kfree(dev_name);
+ free:
+ kfree(t);
+
+ return;
}
static void addrconf_sysctl_unregister(struct ipv6_devconf *p)
@@ -3076,6 +3106,7 @@
struct addrconf_sysctl_table *t = p->sysctl;
p->sysctl = NULL;
unregister_sysctl_table(t->sysctl_header);
+ kfree(t->addrconf_dev[0].procname);
kfree(t);
}
}
diff -Nru a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
--- a/net/ipv6/ip6_tunnel.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv6/ip6_tunnel.c Sat Oct 25 11:45:09 2003
@@ -515,8 +515,7 @@
read_unlock(&ip6ip6_lock);
goto discard;
}
- secpath_put(skb->sp);
- skb->sp = NULL;
+ secpath_reset(skb);
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
skb->protocol = htons(ETH_P_IPV6);
diff -Nru a/net/ipv6/sit.c b/net/ipv6/sit.c
--- a/net/ipv6/sit.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv6/sit.c Sat Oct 25 11:45:09 2003
@@ -377,8 +377,7 @@
read_lock(&ipip6_lock);
if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
- secpath_put(skb->sp);
- skb->sp = NULL;
+ secpath_reset(skb);
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
diff -Nru a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
--- a/net/ipv6/xfrm6_policy.c Sat Oct 25 11:45:09 2003
+++ b/net/ipv6/xfrm6_policy.c Sat Oct 25 11:45:09 2003
@@ -219,6 +219,7 @@
fl->fl_ip_sport = ports[0];
fl->fl_ip_dport = ports[1];
}
+ fl->proto = nexthdr;
return;
/* XXX Why are there these headers? */
@@ -227,6 +228,7 @@
case IPPROTO_COMP:
default:
fl->fl_ipsec_spi = 0;
+ fl->proto = nexthdr;
return;
};
}
diff -Nru a/net/llc/af_llc.c b/net/llc/af_llc.c
--- a/net/llc/af_llc.c Sat Oct 25 11:45:09 2003
+++ b/net/llc/af_llc.c Sat Oct 25 11:45:09 2003
@@ -30,7 +30,6 @@
#include
#include
#include
-#include
/* remember: uninitialized global data is zeroed because its in .bss */
static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START;
diff -Nru a/net/llc/llc_proc.c b/net/llc/llc_proc.c
--- a/net/llc/llc_proc.c Sat Oct 25 11:45:09 2003
+++ b/net/llc/llc_proc.c Sat Oct 25 11:45:09 2003
@@ -14,7 +14,6 @@
#include
#include
-#ifdef CONFIG_PROC_FS
#include
#include
#include
@@ -273,13 +272,3 @@
remove_proc_entry("core", llc_proc_dir);
remove_proc_entry("llc", proc_net);
}
-#else /* CONFIG_PROC_FS */
-int __init llc_proc_init(void)
-{
- return 0;
-}
-
-void llc_proc_exit(void)
-{
-}
-#endif /* CONFIG_PROC_FS */
diff -Nru a/sound/core/pcm_native.c b/sound/core/pcm_native.c
--- a/sound/core/pcm_native.c Sat Oct 25 11:45:09 2003
+++ b/sound/core/pcm_native.c Sat Oct 25 11:45:09 2003
@@ -1982,9 +1982,9 @@
}
}
remove_wait_queue(&pcm->open_wait, &wait);
+ up(&pcm->open_mutex);
if (err < 0)
goto __error;
- up(&pcm->open_mutex);
return err;
__error: