diff -urN linux-2.4.0-test1/Makefile linux-2.4.0-test1-lia/Makefile --- linux-2.4.0-test1/Makefile Wed May 24 08:29:47 2000 +++ linux-2.4.0-test1-lia/Makefile Fri May 26 20:39:17 2000 @@ -82,7 +82,7 @@ CPPFLAGS := -D__KERNEL__ -I$(HPATH) -CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer +CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -g -O2 -fomit-frame-pointer AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS) # use '-fno-strict-aliasing', but only if the compiler can take it diff -urN linux-2.4.0-test1/arch/ia64/Makefile linux-2.4.0-test1-lia/arch/ia64/Makefile --- linux-2.4.0-test1/arch/ia64/Makefile Fri Apr 21 15:21:23 2000 +++ linux-2.4.0-test1-lia/arch/ia64/Makefile Thu May 25 22:52:30 2000 @@ -12,15 +12,11 @@ AWK := awk LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds -# next line is for HP compiler backend: -#AFLAGS += -DGCC_RETVAL_POINTER_IN_R8 -# The next line is needed when compiling with the July snapshot of the Cygnus compiler: -#EXTRA = -D__GCC_DOESNT_KNOW_IN_REGS__ -# next two lines are for the September snapshot of the Cygnus compiler: -AFLAGS += -D__GCC_MULTIREG_RETVALS__ -Wa,-x -EXTRA = -D__GCC_MULTIREG_RETVALS__ +AFLAGS += -Wa,-x +EXTRA = -CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127 +CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \ + -mconstant-gp -funwind-tables ifdef CONFIG_IA64_GENERIC CORE_FILES := arch/$(ARCH)/hp/hp.a \ diff -urN linux-2.4.0-test1/arch/ia64/boot/Makefile linux-2.4.0-test1-lia/arch/ia64/boot/Makefile --- linux-2.4.0-test1/arch/ia64/boot/Makefile Thu Mar 30 16:56:04 2000 +++ linux-2.4.0-test1-lia/arch/ia64/boot/Makefile Thu May 25 22:53:10 2000 @@ -25,7 +25,8 @@ all: $(TARGETS) bootloader: $(OBJECTS) - $(LD) $(LINKFLAGS) $(OBJECTS) $(LIBS) -o bootloader + $(LD) $(LINKFLAGS) $(OBJECTS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \ + -o bootloader clean: rm -f $(TARGETS) diff -urN linux-2.4.0-test1/arch/ia64/config.in linux-2.4.0-test1-lia/arch/ia64/config.in --- linux-2.4.0-test1/arch/ia64/config.in Fri Apr 21 15:21:23 2000 +++ linux-2.4.0-test1-lia/arch/ia64/config.in Thu Jun 1 00:59:33 2000 @@ -4,7 +4,6 @@ comment 'General setup' define_bool CONFIG_IA64 y -define_bool CONFIG_ITANIUM y # easy choice for now... ;-) define_bool CONFIG_ISA n define_bool CONFIG_SBUS n @@ -22,10 +21,13 @@ 64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB if [ "$CONFIG_IA64_DIG" = "y" ]; then + define_bool CONFIG_ITANIUM y + define_bool CONFIG_IA64_BRL_EMU y bool ' Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC + bool ' Enable Itanium A1-step specific code' CONFIG_ITANIUM_A1_SPECIFIC + bool ' Enable use of global TLB purge instruction (ptc.g)' CONFIG_ITANIUM_PTCG bool ' Enable SoftSDV hacks' CONFIG_IA64_SOFTSDV_HACKS - bool ' Enable BigSur hacks' CONFIG_IA64_BIGSUR_HACKS - bool ' Enable Lion hacks' CONFIG_IA64_LION_HACKS + bool ' Enable AzusA hacks' CONFIG_IA64_AZUSA_HACKS bool ' Emulate PAL/SAL/EFI firmware' CONFIG_IA64_FW_EMU bool ' Enable IA64 Machine Check Abort' CONFIG_IA64_MCA fi @@ -43,6 +45,7 @@ bool 'SMP support' CONFIG_SMP bool 'Performance monitor support' CONFIG_PERFMON +bool '/proc/palinfo support' CONFIG_IA64_PALINFO bool 'Networking support' CONFIG_NET bool 'System V IPC' CONFIG_SYSVIPC @@ -187,5 +190,6 @@ bool 'Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG bool 'Turn on irq debug checks (slow!)' CONFIG_IA64_DEBUG_IRQ bool 'Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS +bool 'Enable new unwind support' CONFIG_IA64_NEW_UNWIND endmenu diff -urN linux-2.4.0-test1/arch/ia64/defconfig linux-2.4.0-test1-lia/arch/ia64/defconfig --- linux-2.4.0-test1/arch/ia64/defconfig Mon May 8 22:00:41 2000 +++ linux-2.4.0-test1-lia/arch/ia64/defconfig Fri May 26 20:37:41 2000 @@ -115,8 +115,8 @@ CONFIG_IDEDMA_PCI_EXPERIMENTAL=y # CONFIG_IDEDMA_PCI_WIP is not set # CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set -# CONFIG_BLK_DEV_AEC62XX is not set -# CONFIG_AEC62XX_TUNING is not set +# CONFIG_BLK_DEV_AEC6210 is not set +# CONFIG_AEC6210_TUNING is not set # CONFIG_BLK_DEV_ALI15X3 is not set # CONFIG_WDC_ALI15X3 is not set # CONFIG_BLK_DEV_AMD7409 is not set diff -urN linux-2.4.0-test1/arch/ia64/dig/iosapic.c linux-2.4.0-test1-lia/arch/ia64/dig/iosapic.c --- linux-2.4.0-test1/arch/ia64/dig/iosapic.c Fri Apr 21 15:21:23 2000 +++ linux-2.4.0-test1-lia/arch/ia64/dig/iosapic.c Thu May 25 22:54:40 2000 @@ -67,6 +67,12 @@ (delivery << IO_SAPIC_DELIVERY_SHIFT) | vector); +#ifdef CONFIG_IA64_AZUSA_HACKS + /* set Flush Disable bit */ + if (iosapic_addr != 0xc0000000fec00000) + low32 |= (1 << 17); +#endif + /* dest contains both id and eid */ high32 = (dest << IO_SAPIC_DEST_SHIFT); @@ -216,30 +222,33 @@ } void -iosapic_init (unsigned long address) +iosapic_init (unsigned long address, int irqbase) { struct hw_interrupt_type *irq_type; struct pci_vector_struct *vectors; int i, irq; - /* - * Map the legacy ISA devices into the IOSAPIC data. Some of - * these may get reprogrammed later on with data from the ACPI - * Interrupt Source Override table. - */ - for (i = 0; i < 16; i++) { - irq = isa_irq_to_vector(i); - iosapic_pin(irq) = i; - iosapic_bus(irq) = BUS_ISA; - iosapic_busdata(irq) = 0; - iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY; - iosapic_trigger(irq) = IO_SAPIC_EDGE; - iosapic_polarity(irq) = IO_SAPIC_POL_HIGH; + if (irqbase == 0) + /* + * Map the legacy ISA devices into the IOSAPIC data. + * Some of these may get reprogrammed later on with + * data from the ACPI Interrupt Source Override table. + */ + for (i = 0; i < 16; i++) { + irq = isa_irq_to_vector(i); + iosapic_pin(irq) = i; + iosapic_bus(irq) = BUS_ISA; + iosapic_busdata(irq) = 0; + iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY; + iosapic_trigger(irq) = IO_SAPIC_EDGE; + iosapic_polarity(irq) = IO_SAPIC_POL_HIGH; #ifdef DEBUG_IRQ_ROUTING - printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n", i, irq, iosapic_pin(irq)); + printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n", + i, irq, iosapic_pin(irq)); #endif - } + } +#ifndef CONFIG_IA64_SOFTSDV_HACKS /* * Map the PCI Interrupt data into the ACPI IOSAPIC data using * the info that the bootstrap loader passed to us. @@ -250,6 +259,8 @@ irq = vectors[i].irq; if (irq < 16) irq = isa_irq_to_vector(irq); + if (iosapic_baseirq(irq) != irqbase) + continue; iosapic_bustype(irq) = BUS_PCI; iosapic_pin(irq) = irq - iosapic_baseirq(irq); @@ -270,8 +281,12 @@ irq, iosapic_pin(irq)); #endif } +#endif /* CONFIG_IA64_SOFTSDV_HACKS */ for (i = 0; i < NR_IRQS; ++i) { + if (iosapic_baseirq(i) != irqbase) + continue; + if (iosapic_pin(i) != -1) { if (iosapic_trigger(i) == IO_SAPIC_LEVEL) irq_type = &irq_type_iosapic_level; diff -urN linux-2.4.0-test1/arch/ia64/dig/setup.c linux-2.4.0-test1-lia/arch/ia64/dig/setup.c --- linux-2.4.0-test1/arch/ia64/dig/setup.c Fri Apr 21 15:21:23 2000 +++ linux-2.4.0-test1-lia/arch/ia64/dig/setup.c Tue May 2 12:46:36 2000 @@ -53,6 +53,10 @@ */ ROOT_DEV = to_kdev_t(0x0802); /* default to second partition on first drive */ +#ifdef CONFIG_IA64_SOFTSDV_HACKS + ROOT_DEV = to_kdev_t(0x0302); /* 2nd partion on 1st IDE */ +#endif /* CONFIG_IA64_SOFTSDV_HACKS */ + #ifdef CONFIG_SMP init_smp_config(); #endif diff -urN linux-2.4.0-test1/arch/ia64/hp/hpsim_irq.c linux-2.4.0-test1-lia/arch/ia64/hp/hpsim_irq.c --- linux-2.4.0-test1/arch/ia64/hp/hpsim_irq.c Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/hp/hpsim_irq.c Thu Jun 1 01:00:14 2000 @@ -5,7 +5,8 @@ * Copyright (C) 1998-2000 David Mosberger-Tang */ -#include +#include +#include #include static unsigned int diff -urN linux-2.4.0-test1/arch/ia64/ia32/ia32_entry.S linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_entry.S --- linux-2.4.0-test1/arch/ia64/ia32/ia32_entry.S Fri Apr 21 15:21:23 2000 +++ linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_entry.S Thu May 25 22:55:27 2000 @@ -1,14 +1,15 @@ +#include #include #include +#include "../kernel/entry.h" + // // Get possibly unaligned sigmask argument into an aligned // kernel buffer .text - .proc ia32_rt_sigsuspend - .global ia32_rt_sigsuspend -ia32_rt_sigsuspend: +GLOBAL_ENTRY(ia32_rt_sigsuspend) // We'll cheat and not do an alloc here since we are ultimately // going to do a simple branch to the IA64 sys_rt_sigsuspend. // r32 is still the first argument which is the signal mask. @@ -32,24 +33,22 @@ st4 [r32]=r2 st4 [r10]=r3 br.cond.sptk.many sys_rt_sigsuspend +END(ia32_rt_sigsuspend) .section __ex_table,"a" data4 @gprel(1b) data4 (2b-1b)|1 .previous +GLOBAL_ENTRY(ia32_ret_from_syscall) + PT_REGS_UNWIND_INFO - .endp ia32_rt_sigsuspend - - .global ia32_ret_from_syscall - .proc ia32_ret_from_syscall -ia32_ret_from_syscall: cmp.ge p6,p7=r8,r0 // syscall executed successfully? adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 ;; st8 [r2]=r8 // store return value in slot for r8 br.cond.sptk.few ia64_leave_kernel - .endp ia32_ret_from_syscall +END(ia32_ret_from_syscall) // // Invoke a system call, but do some tracing before and after the call. @@ -61,9 +60,8 @@ // r15 = syscall number // b6 = syscall entry point // - .global ia32_trace_syscall - .proc ia32_trace_syscall -ia32_trace_syscall: +GLOBAL_ENTRY(ia32_trace_syscall) + PT_REGS_UNWIND_INFO br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args .Lret4: br.call.sptk.few rp=b6 // do the syscall .Lret5: cmp.lt p6,p0=r8,r0 // syscall failed? @@ -72,42 +70,38 @@ st8.spill [r2]=r8 // store return value in slot for r8 br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value .Lret6: br.cond.sptk.many ia64_leave_kernel // rp MUST be != ia64_leave_kernel! +END(ia32_trace_syscall) - .endp ia32_trace_syscall - - .align 16 - .global sys32_vfork - .proc sys32_vfork -sys32_vfork: +GLOBAL_ENTRY(sys32_vfork) alloc r16=ar.pfs,2,2,3,0;; mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags br.cond.sptk.few .fork1 // do the work - .endp sys32_vfork +END(sys32_vfork) - .align 16 - .global sys32_fork - .proc sys32_fork -sys32_fork: - alloc r16=ar.pfs,2,2,3,0;; +GLOBAL_ENTRY(sys32_fork) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)) + alloc r16=ar.pfs,2,2,3,0 mov out0=SIGCHLD // out0 = clone_flags + ;; .fork1: - movl r28=1f - mov loc1=rp - br.cond.sptk.many save_switch_stack -1: - mov loc0=r16 // save ar.pfs across do_fork + mov loc0=rp + mov loc1=r16 // save ar.pfs across do_fork + DO_SAVE_SWITCH_STACK + + UNW(.body) + adds out2=IA64_SWITCH_STACK_SIZE+16,sp adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp ;; ld8 out1=[r2] // fetch usp from pt_regs.r12 br.call.sptk.few rp=do_fork .ret1: - mov ar.pfs=loc0 + mov ar.pfs=loc1 + UNW(.restore sp) adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack - mov rp=loc1 - ;; + mov rp=loc0 br.ret.sptk.many rp - .endp sys32_fork +END(sys32_fork) .rodata .align 8 @@ -304,3 +298,8 @@ data8 sys_ni_syscall /* streams1 */ data8 sys_ni_syscall /* streams2 */ data8 sys32_vfork /* 190 */ + /* + * CAUTION: If any system calls are added beyond this point + * then the check in `arch/ia64/kernel/ivt.S' will have + * to be modified also. You've been warned. + */ diff -urN linux-2.4.0-test1/arch/ia64/ia32/ia32_signal.c linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_signal.c --- linux-2.4.0-test1/arch/ia64/ia32/ia32_signal.c Wed May 24 18:38:26 2000 +++ linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_signal.c Thu Jun 1 01:00:27 2000 @@ -55,7 +55,7 @@ }; static int -copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from) +copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from) { int err; @@ -271,7 +271,7 @@ /* Set up to return from userspace. If provided, use a stub already in userspace. */ - err |= __put_user(frame->retcode, &frame->pretcode); + err |= __put_user((long)frame->retcode, &frame->pretcode); /* This is popl %eax ; movl $,%eax ; int $0x80 */ err |= __put_user(0xb858, (short *)(frame->retcode+0)); #define __IA32_NR_sigreturn 119 @@ -326,8 +326,8 @@ ? current->exec_domain->signal_invmap[sig] : sig), &frame->sig); - err |= __put_user(&frame->info, &frame->pinfo); - err |= __put_user(&frame->uc, &frame->puc); + err |= __put_user((long)&frame->info, &frame->pinfo); + err |= __put_user((long)&frame->uc, &frame->puc); err |= copy_siginfo_to_user32(&frame->info, info); /* Create the ucontext. */ @@ -341,7 +341,7 @@ regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - err |= __put_user(frame->retcode, &frame->pretcode); + err |= __put_user((long)frame->retcode, &frame->pretcode); /* This is movl $,%eax ; int $0x80 */ err |= __put_user(0xb8, (char *)(frame->retcode+0)); #define __IA32_NR_rt_sigreturn 173 diff -urN linux-2.4.0-test1/arch/ia64/ia32/ia32_support.c linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_support.c --- linux-2.4.0-test1/arch/ia64/ia32/ia32_support.c Tue Feb 8 12:01:59 2000 +++ linux-2.4.0-test1-lia/arch/ia64/ia32/ia32_support.c Mon May 22 18:03:20 2000 @@ -16,6 +16,43 @@ extern void die_if_kernel (char *str, struct pt_regs *regs, long err); +void +ia32_save_state (struct thread_struct *thread) +{ + unsigned long eflag, fsr, fcr, fir, fdr; + + asm ("mov %0=ar.eflag;" + "mov %1=ar.fsr;" + "mov %2=ar.fcr;" + "mov %3=ar.fir;" + "mov %4=ar.fdr" + : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr)); + thread->eflag = eflag; + thread->fsr = fsr; + thread->fcr = fcr; + thread->fir = fir; + thread->fdr = fdr; +} + +void +ia32_load_state (struct thread_struct *thread) +{ + unsigned long eflag, fsr, fcr, fir, fdr; + + eflag = thread->eflag; + fsr = thread->fsr; + fcr = thread->fcr; + fir = thread->fir; + fdr = thread->fdr; + + asm volatile ("mov ar.eflag=%0;" + "mov ar.fsr=%1;" + "mov ar.fcr=%2;" + "mov ar.fir=%3;" + "mov ar.fdr=%4" + :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr)); +} + /* * Setup IA32 GDT and TSS */ diff -urN linux-2.4.0-test1/arch/ia64/ia32/sys_ia32.c linux-2.4.0-test1-lia/arch/ia64/ia32/sys_ia32.c --- linux-2.4.0-test1/arch/ia64/ia32/sys_ia32.c Sat May 20 10:47:29 2000 +++ linux-2.4.0-test1-lia/arch/ia64/ia32/sys_ia32.c Fri May 26 20:38:41 2000 @@ -7,6 +7,8 @@ * Copyright (C) 1999 Arun Sharma * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2000 Hewlett-Packard Co. + * Copyright (C) 2000 David Mosberger-Tang * * These routines maintain argument size conversion between 32bit and 64bit * environment. @@ -55,24 +57,29 @@ #include #include -#define A(__x) ((unsigned long)(__x)) -#define AA(__x) ((unsigned long)(__x)) +#define A(__x) ((unsigned long)(__x)) +#define AA(__x) ((unsigned long)(__x)) +#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1))) +#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de))) + +extern asmlinkage long sys_execve (char *, char **, char **, struct pt_regs *); +extern asmlinkage long sys_munmap (unsigned long, size_t len); +extern asmlinkage long sys_mprotect (unsigned long, size_t, unsigned long); static int nargs(unsigned int arg, char **ap) { - char *ptr; - int n, err; + int n, err, addr; n = 0; do { - if (err = get_user(ptr, (int *)arg)) + if ((err = get_user(addr, (int *)A(arg))) != 0) return(err); if (ap) - *ap++ = ptr; + *ap++ = (char *)A(addr); arg += sizeof(unsigned int); n++; - } while (ptr); + } while (addr); return(n - 1); } @@ -106,14 +113,14 @@ down(¤t->mm->mmap_sem); lock_kernel(); - av = do_mmap_pgoff(0, NULL, len, - PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0); + av = (char **) do_mmap_pgoff(0, 0UL, len, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, 0); unlock_kernel(); up(¤t->mm->mmap_sem); if (IS_ERR(av)) - return(av); + return (long)av; ae = av + na + 1; av[na] = (char *)0; ae[ne] = (char *)0; @@ -121,7 +128,7 @@ (void)nargs(envp, ae); r = sys_execve(filename, av, ae, regs); if (IS_ERR(r)) - sys_munmap(av, len); + sys_munmap((unsigned long) av, len); return(r); } @@ -146,9 +153,9 @@ return err; } -extern asmlinkage int sys_newstat(char * filename, struct stat * statbuf); +extern asmlinkage long sys_newstat(char * filename, struct stat * statbuf); -asmlinkage int +asmlinkage long sys32_newstat(char * filename, struct stat32 *statbuf) { int ret; @@ -163,9 +170,9 @@ return ret; } -extern asmlinkage int sys_newlstat(char * filename, struct stat * statbuf); +extern asmlinkage long sys_newlstat(char * filename, struct stat * statbuf); -asmlinkage int +asmlinkage long sys32_newlstat(char * filename, struct stat32 *statbuf) { int ret; @@ -180,9 +187,9 @@ return ret; } -extern asmlinkage int sys_newfstat(unsigned int fd, struct stat * statbuf); +extern asmlinkage long sys_newfstat(unsigned int fd, struct stat * statbuf); -asmlinkage int +asmlinkage long sys32_newfstat(unsigned int fd, struct stat32 *statbuf) { int ret; @@ -214,31 +221,23 @@ return -EINVAL; if (prot & PROT_WRITE) prot |= PROT_EXEC; -#ifdef DDD -#else // DDD prot |= PROT_WRITE; -#endif // DDD front = NULL; back = NULL; if ((baddr = (addr & PAGE_MASK)) != addr && get_user(c, (char *)baddr) == 0) { front = kmalloc(addr - baddr, GFP_KERNEL); memcpy(front, (void *)baddr, addr - baddr); } -#ifndef DDD - if (addr) -#endif - if (((addr + len) & ~PAGE_MASK) && get_user(c, (char *)(addr + len)) == 0) { + if (addr && ((addr + len) & ~PAGE_MASK) && get_user(c, (char *)(addr + len)) == 0) { back = kmalloc(PAGE_SIZE - ((addr + len) & ~PAGE_MASK), GFP_KERNEL); - memcpy(back, addr + len, PAGE_SIZE - ((addr + len) & ~PAGE_MASK)); + memcpy(back, (char *)addr + len, PAGE_SIZE - ((addr + len) & ~PAGE_MASK)); } if ((r = do_mmap(0, baddr, len + (addr - baddr), prot, flags | MAP_ANONYMOUS, 0)) < 0) return(r); -#ifndef DDD if (addr == 0) addr = r; -#endif // DDD if (back) { - memcpy(addr + len, back, PAGE_SIZE - ((addr + len) & ~PAGE_MASK)); + memcpy((char *)addr + len, back, PAGE_SIZE - ((addr + len) & ~PAGE_MASK)); kfree(back); } if (front) { @@ -246,7 +245,7 @@ kfree(front); } if (flags & MAP_ANONYMOUS) { - memset(addr, 0, len); + memset((char *)addr, 0, len); return(addr); } if (!file) @@ -280,7 +279,7 @@ unsigned int offset; }; -asmlinkage int +asmlinkage long sys32_mmap(struct mmap_arg_struct *arg) { int error = -EFAULT; @@ -300,11 +299,7 @@ } a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); -#ifdef DDD if ((a.flags & MAP_FIXED) && ((a.addr & ~PAGE_MASK) || (a.offset & ~PAGE_MASK))) { -#else // DDD - if (1) { -#endif // DDD unlock_kernel(); up(¤t->mm->mmap_sem); error = do_mmap_fake(file, a.addr, a.len, a.prot, a.flags, a.offset); @@ -349,7 +344,7 @@ return(sys_mprotect(start & PAGE_MASK, len & PAGE_MASK, prot)); } -asmlinkage int +asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 *act, struct sigaction32 *oact, unsigned int sigsetsize) { @@ -408,10 +403,10 @@ } -extern asmlinkage int sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, - size_t sigsetsize); +extern asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, + size_t sigsetsize); -asmlinkage int +asmlinkage long sys32_rt_sigprocmask(int how, sigset32_t *set, sigset32_t *oset, unsigned int sigsetsize) { @@ -466,9 +461,9 @@ return err; } -extern asmlinkage int sys_statfs(const char * path, struct statfs * buf); +extern asmlinkage long sys_statfs(const char * path, struct statfs * buf); -asmlinkage int +asmlinkage long sys32_statfs(const char * path, struct statfs32 *buf) { int ret; @@ -483,9 +478,9 @@ return ret; } -extern asmlinkage int sys_fstatfs(unsigned int fd, struct statfs * buf); +extern asmlinkage long sys_fstatfs(unsigned int fd, struct statfs * buf); -asmlinkage int +asmlinkage long sys32_fstatfs(unsigned int fd, struct statfs32 *buf) { int ret; @@ -552,7 +547,7 @@ extern int do_getitimer(int which, struct itimerval *value); -asmlinkage int +asmlinkage long sys32_getitimer(int which, struct itimerval32 *it) { struct itimerval kit; @@ -567,7 +562,7 @@ extern int do_setitimer(int which, struct itimerval *, struct itimerval *); -asmlinkage int +asmlinkage long sys32_setitimer(int which, struct itimerval32 *in, struct itimerval32 *out) { struct itimerval kin, kout; @@ -612,7 +607,7 @@ extern struct timezone sys_tz; extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz); -asmlinkage int +asmlinkage long sys32_gettimeofday(struct timeval32 *tv, struct timezone *tz) { if (tv) { @@ -628,7 +623,7 @@ return 0; } -asmlinkage int +asmlinkage long sys32_settimeofday(struct timeval32 *tv, struct timezone *tz) { struct timeval ktv; @@ -646,56 +641,135 @@ return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL); } -struct dirent32 { - unsigned int d_ino; - unsigned int d_off; - unsigned short d_reclen; - char d_name[NAME_MAX + 1]; +struct linux32_dirent { + u32 d_ino; + u32 d_off; + u16 d_reclen; + char d_name[1]; }; -static void -xlate_dirent(void *dirent64, void *dirent32, long n) +struct old_linux32_dirent { + u32 d_ino; + u32 d_offset; + u16 d_namlen; + char d_name[1]; +}; + +struct getdents32_callback { + struct linux32_dirent * current_dir; + struct linux32_dirent * previous; + int count; + int error; +}; + +struct readdir32_callback { + struct old_linux32_dirent * dirent; + int count; +}; + +static int +filldir32 (void *__buf, const char *name, int namlen, off_t offset, ino_t ino) { - long off; - struct dirent *dirp; - struct dirent32 *dirp32; - - off = 0; - while (off < n) { - dirp = (struct dirent *)(dirent64 + off); - dirp32 = (struct dirent32 *)(dirent32 + off); - off += dirp->d_reclen; - dirp32->d_ino = dirp->d_ino; - dirp32->d_off = (unsigned int)dirp->d_off; - dirp32->d_reclen = dirp->d_reclen; - strncpy(dirp32->d_name, dirp->d_name, dirp->d_reclen - ((3 * 4) + 2)); - } - return; + struct linux32_dirent * dirent; + struct getdents32_callback * buf = (struct getdents32_callback *) __buf; + int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4); + + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; + dirent = buf->previous; + if (dirent) + put_user(offset, &dirent->d_off); + dirent = buf->current_dir; + buf->previous = dirent; + put_user(ino, &dirent->d_ino); + put_user(reclen, &dirent->d_reclen); + copy_to_user(dirent->d_name, name, namlen); + put_user(0, dirent->d_name + namlen); + ((char *) dirent) += reclen; + buf->current_dir = dirent; + buf->count -= reclen; + return 0; } asmlinkage long -sys32_getdents(unsigned int fd, void * dirent32, unsigned int count) +sys32_getdents (unsigned int fd, void * dirent, unsigned int count) { - long n; - void *dirent64; + struct file * file; + struct linux32_dirent * lastdirent; + struct getdents32_callback buf; + int error; - dirent64 = (unsigned long)(dirent32 + (sizeof(long) - 1)) & ~(sizeof(long) - 1); - if ((n = sys_getdents(fd, dirent64, count - (dirent64 - dirent32))) < 0) - return(n); - xlate_dirent(dirent64, dirent32, n); - return(n); + error = -EBADF; + file = fget(fd); + if (!file) + goto out; + + buf.current_dir = (struct linux32_dirent *) dirent; + buf.previous = NULL; + buf.count = count; + buf.error = 0; + + lock_kernel(); + error = vfs_readdir(file, filldir32, &buf); + if (error < 0) + goto out_putf; + error = buf.error; + lastdirent = buf.previous; + if (lastdirent) { + put_user(file->f_pos, &lastdirent->d_off); + error = count - buf.count; + } + +out_putf: + unlock_kernel(); + fput(file); +out: + return error; } -asmlinkage int -sys32_readdir(unsigned int fd, void * dirent32, unsigned int count) +static int +fillonedir32 (void * __buf, const char * name, int namlen, off_t offset, ino_t ino) { - int n; - struct dirent dirent64; + struct readdir32_callback * buf = (struct readdir32_callback *) __buf; + struct old_linux32_dirent * dirent; - if ((n = old_readdir(fd, &dirent64, count)) < 0) - return(n); - xlate_dirent(&dirent64, dirent32, dirent64.d_reclen); - return(n); + if (buf->count) + return -EINVAL; + buf->count++; + dirent = buf->dirent; + put_user(ino, &dirent->d_ino); + put_user(offset, &dirent->d_offset); + put_user(namlen, &dirent->d_namlen); + copy_to_user(dirent->d_name, name, namlen); + put_user(0, dirent->d_name + namlen); + return 0; +} + +asmlinkage long +sys32_readdir (unsigned int fd, void * dirent, unsigned int count) +{ + int error; + struct file * file; + struct readdir32_callback buf; + + error = -EBADF; + file = fget(fd); + if (!file) + goto out; + + buf.count = 0; + buf.dirent = dirent; + + lock_kernel(); + error = vfs_readdir(file, fillonedir32, &buf); + if (error >= 0) + error = buf.count; + unlock_kernel(); + + fput(file); +out: + return error; } /* @@ -708,9 +782,9 @@ */ #define MAX_SELECT_SECONDS \ ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) -#define ROUND_UP(x,y) (((x)+(y)-1)/(y)) +#define ROUND_UP_TIME(x,y) (((x)+(y)-1)/(y)) -asmlinkage int +asmlinkage long sys32_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval32 *tvp32) { fd_set_bits fds; @@ -730,7 +804,7 @@ goto out_nofds; if ((unsigned long) sec < MAX_SELECT_SECONDS) { - timeout = ROUND_UP(usec, 1000000/HZ); + timeout = ROUND_UP_TIME(usec, 1000000/HZ); timeout += sec * (unsigned long) HZ; } } @@ -807,13 +881,15 @@ unsigned int tvp; }; -asmlinkage int old_select(struct sel_arg_struct *arg) +asmlinkage long +old_select(struct sel_arg_struct *arg) { struct sel_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - return sys32_select(a.n, a.inp, a.outp, a.exp, a.tvp); + return sys32_select(a.n, (fd_set *)A(a.inp), (fd_set *)A(a.outp), (fd_set *)A(a.exp), + (struct timeval32 *)A(a.tvp)); } struct timespec32 { @@ -821,10 +897,9 @@ int tv_nsec; }; -extern asmlinkage int sys_nanosleep(struct timespec *rqtp, - struct timespec *rmtp); +extern asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp); -asmlinkage int +asmlinkage long sys32_nanosleep(struct timespec32 *rqtp, struct timespec32 *rmtp) { struct timespec t; @@ -1005,9 +1080,9 @@ int rlim_max; }; -extern asmlinkage int sys_getrlimit(unsigned int resource, struct rlimit *rlim); +extern asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim); -asmlinkage int +asmlinkage long sys32_getrlimit(unsigned int resource, struct rlimit32 *rlim) { struct rlimit r; @@ -1024,9 +1099,9 @@ return ret; } -extern asmlinkage int sys_setrlimit(unsigned int resource, struct rlimit *rlim); +extern asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit *rlim); -asmlinkage int +asmlinkage long sys32_setrlimit(unsigned int resource, struct rlimit32 *rlim) { struct rlimit r; @@ -1047,118 +1122,6 @@ return ret; } -/* Argument list sizes for sys_socketcall */ -#define AL(x) ((x) * sizeof(u32)) -static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), - AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), - AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)}; -#undef AL - -extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); -extern asmlinkage int sys_connect(int fd, struct sockaddr *uservaddr, - int addrlen); -extern asmlinkage int sys_accept(int fd, struct sockaddr *upeer_sockaddr, - int *upeer_addrlen); -extern asmlinkage int sys_getsockname(int fd, struct sockaddr *usockaddr, - int *usockaddr_len); -extern asmlinkage int sys_getpeername(int fd, struct sockaddr *usockaddr, - int *usockaddr_len); -extern asmlinkage int sys_send(int fd, void *buff, size_t len, unsigned flags); -extern asmlinkage int sys_sendto(int fd, u32 buff, __kernel_size_t32 len, - unsigned flags, u32 addr, int addr_len); -extern asmlinkage int sys_recv(int fd, void *ubuf, size_t size, unsigned flags); -extern asmlinkage int sys_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size, - unsigned flags, u32 addr, u32 addr_len); -extern asmlinkage int sys_setsockopt(int fd, int level, int optname, - char *optval, int optlen); -extern asmlinkage int sys_getsockopt(int fd, int level, int optname, - u32 optval, u32 optlen); - -extern asmlinkage int sys_socket(int family, int type, int protocol); -extern asmlinkage int sys_socketpair(int family, int type, int protocol, - int usockvec[2]); -extern asmlinkage int sys_shutdown(int fd, int how); -extern asmlinkage int sys_listen(int fd, int backlog); - -asmlinkage int sys32_socketcall(int call, u32 *args) -{ - int i, ret; - u32 a[6]; - u32 a0,a1; - - if (callSYS_RECVMSG) - return -EINVAL; - if (copy_from_user(a, args, nas[call])) - return -EFAULT; - a0=a[0]; - a1=a[1]; - - switch(call) - { - case SYS_SOCKET: - ret = sys_socket(a0, a1, a[2]); - break; - case SYS_BIND: - ret = sys_bind(a0, (struct sockaddr *)A(a1), a[2]); - break; - case SYS_CONNECT: - ret = sys_connect(a0, (struct sockaddr *)A(a1), a[2]); - break; - case SYS_LISTEN: - ret = sys_listen(a0, a1); - break; - case SYS_ACCEPT: - ret = sys_accept(a0, (struct sockaddr *)A(a1), - (int *)A(a[2])); - break; - case SYS_GETSOCKNAME: - ret = sys_getsockname(a0, (struct sockaddr *)A(a1), - (int *)A(a[2])); - break; - case SYS_GETPEERNAME: - ret = sys_getpeername(a0, (struct sockaddr *)A(a1), - (int *)A(a[2])); - break; - case SYS_SOCKETPAIR: - ret = sys_socketpair(a0, a1, a[2], (int *)A(a[3])); - break; - case SYS_SEND: - ret = sys_send(a0, (void *)A(a1), a[2], a[3]); - break; - case SYS_SENDTO: - ret = sys_sendto(a0, a1, a[2], a[3], a[4], a[5]); - break; - case SYS_RECV: - ret = sys_recv(a0, (void *)A(a1), a[2], a[3]); - break; - case SYS_RECVFROM: - ret = sys_recvfrom(a0, a1, a[2], a[3], a[4], a[5]); - break; - case SYS_SHUTDOWN: - ret = sys_shutdown(a0,a1); - break; - case SYS_SETSOCKOPT: - ret = sys_setsockopt(a0, a1, a[2], (char *)A(a[3]), - a[4]); - break; - case SYS_GETSOCKOPT: - ret = sys_getsockopt(a0, a1, a[2], a[3], a[4]); - break; - case SYS_SENDMSG: - ret = sys32_sendmsg(a0, (struct msghdr32 *)A(a1), - a[2]); - break; - case SYS_RECVMSG: - ret = sys32_recvmsg(a0, (struct msghdr32 *)A(a1), - a[2]); - break; - default: - ret = EINVAL; - break; - } - return ret; -} - /* * Declare the IA32 version of the msghdr */ @@ -1181,13 +1144,13 @@ if (!access_ok(VERIFY_READ, mp32, sizeof(*mp32))) return(-EFAULT); __get_user(i, &mp32->msg_name); - mp->msg_name = (void *)i; + mp->msg_name = (void *)A(i); __get_user(mp->msg_namelen, &mp32->msg_namelen); __get_user(i, &mp32->msg_iov); - mp->msg_iov = (struct iov *)i; + mp->msg_iov = (struct iovec *)A(i); __get_user(mp->msg_iovlen, &mp32->msg_iovlen); __get_user(i, &mp32->msg_control); - mp->msg_control = (void *)i; + mp->msg_control = (void *)A(i); __get_user(mp->msg_controllen, &mp32->msg_controllen); __get_user(mp->msg_flags, &mp32->msg_flags); return(0); @@ -1233,7 +1196,7 @@ iov32 = (struct iovec32 *)iov; for (ct = m->msg_iovlen; ct-- > 0; ) { iov[ct].iov_len = (__kernel_size_t)iov32[ct].iov_len; - iov[ct].iov_base = (void *)iov32[ct].iov_base; + iov[ct].iov_base = (void *) A(iov32[ct].iov_base); err += iov[ct].iov_len; } out: @@ -1258,7 +1221,7 @@ * BSD sendmsg interface */ -asmlinkage int sys32_sendmsg(int fd, struct msghdr32 *msg, unsigned flags) +int sys32_sendmsg(int fd, struct msghdr32 *msg, unsigned flags) { struct socket *sock; char address[MAX_SOCK_ADDR]; @@ -1337,7 +1300,8 @@ * BSD recvmsg interface */ -asmlinkage int sys32_recvmsg(int fd, struct msghdr32 *msg, unsigned int flags) +int +sys32_recvmsg (int fd, struct msghdr32 *msg, unsigned int flags) { struct socket *sock; struct iovec iovstack[UIO_FASTIOV]; @@ -1419,6 +1383,118 @@ return err; } +/* Argument list sizes for sys_socketcall */ +#define AL(x) ((x) * sizeof(u32)) +static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), + AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), + AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)}; +#undef AL + +extern asmlinkage long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); +extern asmlinkage long sys_connect(int fd, struct sockaddr *uservaddr, + int addrlen); +extern asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr, + int *upeer_addrlen); +extern asmlinkage long sys_getsockname(int fd, struct sockaddr *usockaddr, + int *usockaddr_len); +extern asmlinkage long sys_getpeername(int fd, struct sockaddr *usockaddr, + int *usockaddr_len); +extern asmlinkage long sys_send(int fd, void *buff, size_t len, unsigned flags); +extern asmlinkage long sys_sendto(int fd, u32 buff, __kernel_size_t32 len, + unsigned flags, u32 addr, int addr_len); +extern asmlinkage long sys_recv(int fd, void *ubuf, size_t size, unsigned flags); +extern asmlinkage long sys_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size, + unsigned flags, u32 addr, u32 addr_len); +extern asmlinkage long sys_setsockopt(int fd, int level, int optname, + char *optval, int optlen); +extern asmlinkage long sys_getsockopt(int fd, int level, int optname, + u32 optval, u32 optlen); + +extern asmlinkage long sys_socket(int family, int type, int protocol); +extern asmlinkage long sys_socketpair(int family, int type, int protocol, + int usockvec[2]); +extern asmlinkage long sys_shutdown(int fd, int how); +extern asmlinkage long sys_listen(int fd, int backlog); + +asmlinkage long sys32_socketcall(int call, u32 *args) +{ + int ret; + u32 a[6]; + u32 a0,a1; + + if (callSYS_RECVMSG) + return -EINVAL; + if (copy_from_user(a, args, nas[call])) + return -EFAULT; + a0=a[0]; + a1=a[1]; + + switch(call) + { + case SYS_SOCKET: + ret = sys_socket(a0, a1, a[2]); + break; + case SYS_BIND: + ret = sys_bind(a0, (struct sockaddr *)A(a1), a[2]); + break; + case SYS_CONNECT: + ret = sys_connect(a0, (struct sockaddr *)A(a1), a[2]); + break; + case SYS_LISTEN: + ret = sys_listen(a0, a1); + break; + case SYS_ACCEPT: + ret = sys_accept(a0, (struct sockaddr *)A(a1), + (int *)A(a[2])); + break; + case SYS_GETSOCKNAME: + ret = sys_getsockname(a0, (struct sockaddr *)A(a1), + (int *)A(a[2])); + break; + case SYS_GETPEERNAME: + ret = sys_getpeername(a0, (struct sockaddr *)A(a1), + (int *)A(a[2])); + break; + case SYS_SOCKETPAIR: + ret = sys_socketpair(a0, a1, a[2], (int *)A(a[3])); + break; + case SYS_SEND: + ret = sys_send(a0, (void *)A(a1), a[2], a[3]); + break; + case SYS_SENDTO: + ret = sys_sendto(a0, a1, a[2], a[3], a[4], a[5]); + break; + case SYS_RECV: + ret = sys_recv(a0, (void *)A(a1), a[2], a[3]); + break; + case SYS_RECVFROM: + ret = sys_recvfrom(a0, a1, a[2], a[3], a[4], a[5]); + break; + case SYS_SHUTDOWN: + ret = sys_shutdown(a0,a1); + break; + case SYS_SETSOCKOPT: + ret = sys_setsockopt(a0, a1, a[2], (char *)A(a[3]), + a[4]); + break; + case SYS_GETSOCKOPT: + ret = sys_getsockopt(a0, a1, a[2], a[3], a[4]); + break; + case SYS_SENDMSG: + ret = sys32_sendmsg(a0, (struct msghdr32 *)A(a1), + a[2]); + break; + case SYS_RECVMSG: + ret = sys32_recvmsg(a0, (struct msghdr32 *)A(a1), + a[2]); + break; + default: + ret = EINVAL; + break; + } + return ret; +} + /* * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.. * @@ -1613,7 +1689,7 @@ static int do_sys32_msgctl (int first, int second, void *uptr) { - int err, err2; + int err = -EINVAL, err2; struct msqid_ds m; struct msqid64_ds m64; struct msqid_ds32 *up = (struct msqid_ds32 *)uptr; @@ -1644,7 +1720,7 @@ case MSG_STAT: old_fs = get_fs (); set_fs (KERNEL_DS); - err = sys_msgctl (first, second, &m64); + err = sys_msgctl (first, second, (void *) &m64); set_fs (old_fs); err2 = put_user (m64.msg_perm.key, &up->msg_perm.key); err2 |= __put_user(m64.msg_perm.uid, &up->msg_perm.uid); @@ -1725,7 +1801,7 @@ case SHM_STAT: old_fs = get_fs (); set_fs (KERNEL_DS); - err = sys_shmctl (first, second, &s64); + err = sys_shmctl (first, second, (void *) &s64); set_fs (old_fs); if (err < 0) break; @@ -1753,7 +1829,7 @@ case SHM_INFO: old_fs = get_fs (); set_fs (KERNEL_DS); - err = sys_shmctl (first, second, &si); + err = sys_shmctl (first, second, (void *)&si); set_fs (old_fs); if (err < 0) break; @@ -1773,7 +1849,7 @@ return err; } -asmlinkage int +asmlinkage long sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) { int version, err; @@ -1898,10 +1974,10 @@ return err; } -extern asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, +extern asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); -asmlinkage int +asmlinkage long sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options, struct rusage32 *ru) { @@ -1923,17 +1999,17 @@ } } -asmlinkage int +asmlinkage long sys32_waitpid(__kernel_pid_t32 pid, unsigned int *stat_addr, int options) { return sys32_wait4(pid, stat_addr, options, NULL); } -extern asmlinkage int +extern asmlinkage long sys_getrusage(int who, struct rusage *ru); -asmlinkage int +asmlinkage long sys32_getrusage(int who, struct rusage32 *ru) { struct rusage r; @@ -2429,9 +2505,9 @@ /* 32-bit timeval and related flotsam. */ -extern asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on); +extern asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); -asmlinkage int +asmlinkage long sys32_ioperm(u32 from, u32 num, int on) { return sys_ioperm((unsigned long)from, (unsigned long)num, on); @@ -2503,10 +2579,10 @@ __kernel_time_t32 dqb_itime; }; -extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, +extern asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr); -asmlinkage int +asmlinkage long sys32_quotactl(int cmd, const char *special, int id, unsigned long addr) { int cmds = cmd >> SUBCMDSHIFT; @@ -2550,13 +2626,13 @@ return err; } -extern asmlinkage int sys_utime(char * filename, struct utimbuf * times); +extern asmlinkage long sys_utime(char * filename, struct utimbuf * times); struct utimbuf32 { __kernel_time_t32 actime, modtime; }; -asmlinkage int +asmlinkage long sys32_utime(char * filename, struct utimbuf32 *times) { struct utimbuf t; @@ -2640,10 +2716,10 @@ __put_user(*fdset, ufdset); } -extern asmlinkage int sys_sysfs(int option, unsigned long arg1, +extern asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2); -asmlinkage int +asmlinkage long sys32_sysfs(int option, u32 arg1, u32 arg2) { return sys_sysfs(option, arg1, arg2); @@ -2739,7 +2815,7 @@ #define SMBFS_NAME "smbfs" #define NCPFS_NAME "ncpfs" -asmlinkage int +asmlinkage long sys32_mount(char *dev_name, char *dir_name, char *type, unsigned long new_flags, u32 data) { @@ -2813,9 +2889,9 @@ char _f[22]; }; -extern asmlinkage int sys_sysinfo(struct sysinfo *info); +extern asmlinkage long sys_sysinfo(struct sysinfo *info); -asmlinkage int +asmlinkage long sys32_sysinfo(struct sysinfo32 *info) { struct sysinfo s; @@ -2841,10 +2917,10 @@ return ret; } -extern asmlinkage int sys_sched_rr_get_interval(pid_t pid, +extern asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval); -asmlinkage int +asmlinkage long sys32_sched_rr_get_interval(__kernel_pid_t32 pid, struct timespec32 *interval) { struct timespec t; @@ -2860,10 +2936,10 @@ return ret; } -extern asmlinkage int sys_sigprocmask(int how, old_sigset_t *set, +extern asmlinkage long sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset); -asmlinkage int +asmlinkage long sys32_sigprocmask(int how, old_sigset_t32 *set, old_sigset_t32 *oset) { old_sigset_t s; @@ -2879,9 +2955,9 @@ return 0; } -extern asmlinkage int sys_sigpending(old_sigset_t *set); +extern asmlinkage long sys_sigpending(old_sigset_t *set); -asmlinkage int +asmlinkage long sys32_sigpending(old_sigset_t32 *set) { old_sigset_t s; @@ -2895,9 +2971,9 @@ return ret; } -extern asmlinkage int sys_rt_sigpending(sigset_t *set, size_t sigsetsize); +extern asmlinkage long sys_rt_sigpending(sigset_t *set, size_t sigsetsize); -asmlinkage int +asmlinkage long sys32_rt_sigpending(sigset_t32 *set, __kernel_size_t32 sigsetsize) { sigset_t s; @@ -3000,11 +3076,11 @@ return d; } -extern asmlinkage int +extern asmlinkage long sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo, const struct timespec *uts, size_t sigsetsize); -asmlinkage int +asmlinkage long sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo, struct timespec32 *uts, __kernel_size_t32 sigsetsize) { @@ -3041,10 +3117,10 @@ return ret; } -extern asmlinkage int +extern asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo); -asmlinkage int +asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo) { siginfo_t info; @@ -3062,9 +3138,9 @@ return ret; } -extern asmlinkage int sys_setreuid(uid_t ruid, uid_t euid); +extern asmlinkage long sys_setreuid(uid_t ruid, uid_t euid); -asmlinkage int sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid) +asmlinkage long sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid) { uid_t sruid, seuid; @@ -3073,9 +3149,9 @@ return sys_setreuid(sruid, seuid); } -extern asmlinkage int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); +extern asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); -asmlinkage int +asmlinkage long sys32_setresuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid, __kernel_uid_t32 suid) { @@ -3087,9 +3163,9 @@ return sys_setresuid(sruid, seuid, ssuid); } -extern asmlinkage int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); +extern asmlinkage long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); -asmlinkage int +asmlinkage long sys32_getresuid(__kernel_uid_t32 *ruid, __kernel_uid_t32 *euid, __kernel_uid_t32 *suid) { @@ -3105,9 +3181,9 @@ return ret; } -extern asmlinkage int sys_setregid(gid_t rgid, gid_t egid); +extern asmlinkage long sys_setregid(gid_t rgid, gid_t egid); -asmlinkage int +asmlinkage long sys32_setregid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid) { gid_t srgid, segid; @@ -3117,9 +3193,9 @@ return sys_setregid(srgid, segid); } -extern asmlinkage int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); +extern asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); -asmlinkage int +asmlinkage long sys32_setresgid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid, __kernel_gid_t32 sgid) { @@ -3131,9 +3207,9 @@ return sys_setresgid(srgid, segid, ssgid); } -extern asmlinkage int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); +extern asmlinkage long sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); -asmlinkage int +asmlinkage long sys32_getresgid(__kernel_gid_t32 *rgid, __kernel_gid_t32 *egid, __kernel_gid_t32 *sgid) { @@ -3152,9 +3228,9 @@ return ret; } -extern asmlinkage int sys_getgroups(int gidsetsize, gid_t *grouplist); +extern asmlinkage long sys_getgroups(int gidsetsize, gid_t *grouplist); -asmlinkage int +asmlinkage long sys32_getgroups(int gidsetsize, __kernel_gid_t32 *grouplist) { gid_t gl[NGROUPS]; @@ -3171,9 +3247,9 @@ return ret; } -extern asmlinkage int sys_setgroups(int gidsetsize, gid_t *grouplist); +extern asmlinkage long sys_setgroups(int gidsetsize, gid_t *grouplist); -asmlinkage int +asmlinkage long sys32_setgroups(int gidsetsize, __kernel_gid_t32 *grouplist) { gid_t gl[NGROUPS]; @@ -3617,7 +3693,7 @@ kmsg->msg_control = (void *) orig_cmsg_uptr; } -asmlinkage int +asmlinkage long sys32_sendmsg(int fd, struct msghdr32 *user_msg, unsigned user_flags) { struct socket *sock; @@ -3665,7 +3741,7 @@ return err; } -asmlinkage int +asmlinkage long sys32_recvmsg(int fd, struct msghdr32 *user_msg, unsigned int user_flags) { struct iovec iovstack[UIO_FASTIOV]; @@ -3756,7 +3832,7 @@ extern void check_pending(int signum); -asmlinkage int +asmlinkage long sys32_sigaction (int sig, struct old_sigaction32 *act, struct old_sigaction32 *oact) { @@ -3801,21 +3877,21 @@ return sys_create_module(name_user, (size_t)size); } -extern asmlinkage int sys_init_module(const char *name_user, +extern asmlinkage long sys_init_module(const char *name_user, struct module *mod_user); /* Hey, when you're trying to init module, take time and prepare us a nice 64bit * module structure, even if from 32bit modutils... Why to pollute kernel... :)) */ -asmlinkage int +asmlinkage long sys32_init_module(const char *name_user, struct module *mod_user) { return sys_init_module(name_user, mod_user); } -extern asmlinkage int sys_delete_module(const char *name_user); +extern asmlinkage long sys_delete_module(const char *name_user); -asmlinkage int +asmlinkage long sys32_delete_module(const char *name_user) { return sys_delete_module(name_user); @@ -4090,7 +4166,7 @@ return error; } -asmlinkage int +asmlinkage long sys32_query_module(char *name_user, int which, char *buf, __kernel_size_t32 bufsize, u32 ret) { @@ -4158,9 +4234,9 @@ char name[60]; }; -extern asmlinkage int sys_get_kernel_syms(struct kernel_sym *table); +extern asmlinkage long sys_get_kernel_syms(struct kernel_sym *table); -asmlinkage int +asmlinkage long sys32_get_kernel_syms(struct kernel_sym32 *table) { int len, i; @@ -4192,19 +4268,19 @@ return -ENOSYS; } -asmlinkage int +asmlinkage long sys32_init_module(const char *name_user, struct module *mod_user) { return -ENOSYS; } -asmlinkage int +asmlinkage long sys32_delete_module(const char *name_user) { return -ENOSYS; } -asmlinkage int +asmlinkage long sys32_query_module(const char *name_user, int which, char *buf, size_t bufsize, size_t *ret) { @@ -4216,7 +4292,7 @@ return -ENOSYS; } -asmlinkage int +asmlinkage long sys32_get_kernel_syms(struct kernel_sym *table) { return -ENOSYS; @@ -4432,7 +4508,7 @@ return err; } -extern asmlinkage int sys_nfsservctl(int cmd, void *arg, void *resp); +extern asmlinkage long sys_nfsservctl(int cmd, void *arg, void *resp); int asmlinkage sys32_nfsservctl(int cmd, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32) @@ -4503,9 +4579,9 @@ return err; } -asmlinkage int sys_utimes(char *, struct timeval *); +asmlinkage long sys_utimes(char *, struct timeval *); -asmlinkage int +asmlinkage long sys32_utimes(char *filename, struct timeval32 *tvs) { char *kfilename; @@ -4533,7 +4609,7 @@ } /* These are here just in case some old ia32 binary calls it. */ -asmlinkage int +asmlinkage long sys32_pause(void) { current->state = TASK_INTERRUPTIBLE; @@ -4542,19 +4618,19 @@ } /* PCI config space poking. */ -extern asmlinkage int sys_pciconfig_read(unsigned long bus, +extern asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, unsigned char *buf); -extern asmlinkage int sys_pciconfig_write(unsigned long bus, +extern asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, unsigned char *buf); -asmlinkage int +asmlinkage long sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) { return sys_pciconfig_read((unsigned long) bus, @@ -4564,7 +4640,7 @@ (unsigned char *)AA(ubuf)); } -asmlinkage int +asmlinkage long sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) { return sys_pciconfig_write((unsigned long) bus, @@ -4574,11 +4650,11 @@ (unsigned char *)AA(ubuf)); } -extern asmlinkage int sys_prctl(int option, unsigned long arg2, +extern asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); -asmlinkage int +asmlinkage long sys32_prctl(int option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) { return sys_prctl(option, @@ -4589,9 +4665,9 @@ } -extern asmlinkage int sys_newuname(struct new_utsname * name); +extern asmlinkage long sys_newuname(struct new_utsname * name); -asmlinkage int +asmlinkage long sys32_newuname(struct new_utsname * name) { int ret = sys_newuname(name); @@ -4627,9 +4703,9 @@ } -extern asmlinkage int sys_personality(unsigned long); +extern asmlinkage long sys_personality(unsigned long); -asmlinkage int +asmlinkage long sys32_personality(unsigned long personality) { int ret; @@ -4646,7 +4722,7 @@ extern asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count); -asmlinkage int +asmlinkage long sys32_sendfile(int out_fd, int in_fd, __kernel_off_t32 *offset, s32 count) { mm_segment_t old_fs = get_fs(); @@ -4683,7 +4759,7 @@ extern int do_adjtimex(struct timex *); -asmlinkage int +asmlinkage long sys32_adjtimex(struct timex32 *utp) { struct timex txc; diff -urN linux-2.4.0-test1/arch/ia64/kernel/Makefile linux-2.4.0-test1-lia/arch/ia64/kernel/Makefile --- linux-2.4.0-test1/arch/ia64/kernel/Makefile Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/Makefile Thu Jun 1 01:00:52 2000 @@ -1,11 +1,6 @@ # # Makefile for the linux kernel. # -# Note! Dependencies are done automagically by 'make dep', which also -# removes any old dependencies. DON'T put your own dependencies here -# unless it's something special (ie not a .c file). -# -# Note 2! The CFLAGS definitions are now in the main makefile... .S.s: $(CPP) $(AFLAGS) -o $*.s $< @@ -15,16 +10,19 @@ all: kernel.o head.o init_task.o O_TARGET := kernel.o -O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \ - pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o \ +O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \ + pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o \ signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o -#O_OBJS := fpreg.o -#OX_OBJS := ia64_ksyms.o +OX_OBJS := ia64_ksyms.o ifdef CONFIG_IA64_GENERIC O_OBJS += machvec.o endif +ifdef CONFIG_IA64_PALINFO +O_OBJS += palinfo.o +endif + ifdef CONFIG_PCI O_OBJS += pci.o endif @@ -35,6 +33,10 @@ ifdef CONFIG_IA64_MCA O_OBJS += mca.o mca_asm.o +endif + +ifdef CONFIG_IA64_BRL_EMU +O_OBJS += brl_emu.o endif clean:: diff -urN linux-2.4.0-test1/arch/ia64/kernel/acpi.c linux-2.4.0-test1-lia/arch/ia64/kernel/acpi.c --- linux-2.4.0-test1/arch/ia64/kernel/acpi.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/acpi.c Thu May 25 22:56:31 2000 @@ -89,16 +89,16 @@ #ifdef CONFIG_IA64_DIG acpi_entry_iosapic_t *iosapic = (acpi_entry_iosapic_t *) p; unsigned int ver, v; - int l, pins; + int l, max_pin; ver = iosapic_version(iosapic->address); - pins = (ver >> 16) & 0xff; + max_pin = (ver >> 16) & 0xff; printk("IOSAPIC Version %x.%x: address 0x%lx IRQs 0x%x - 0x%x\n", (ver & 0xf0) >> 4, (ver & 0x0f), iosapic->address, - iosapic->irq_base, iosapic->irq_base + pins); + iosapic->irq_base, iosapic->irq_base + max_pin); - for (l = 0; l < pins; l++) { + for (l = 0; l <= max_pin; l++) { v = iosapic->irq_base + l; if (v < 16) v = isa_irq_to_vector(v); @@ -110,7 +110,7 @@ iosapic_addr(v) = (unsigned long) ioremap(iosapic->address, 0); iosapic_baseirq(v) = iosapic->irq_base; } - iosapic_init(iosapic->address); + iosapic_init(iosapic->address, iosapic->irq_base); #endif } diff -urN linux-2.4.0-test1/arch/ia64/kernel/brl_emu.c linux-2.4.0-test1-lia/arch/ia64/kernel/brl_emu.c --- linux-2.4.0-test1/arch/ia64/kernel/brl_emu.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/brl_emu.c Thu Jun 1 01:01:03 2000 @@ -0,0 +1,220 @@ +/* + * Emulation of the "brl" instruction for IA64 processors that + * don't support it in hardware. + * Author: Stephan Zeisset, Intel Corp. + */ + +#include +#include +#include +#include + +extern char ia64_set_b1, ia64_set_b2, ia64_set_b3, ia64_set_b4, ia64_set_b5; + +struct illegal_op_return { + unsigned long fkt, arg1, arg2, arg3; +}; + +/* + * The unimplemented bits of a virtual address must be set + * to the value of the most significant implemented bit. + * unimpl_va_mask includes all unimplemented bits and + * the most significant implemented bit, so the result + * of an and operation with the mask must be all 0's + * or all 1's for the address to be valid. + */ +#define unimplemented_virtual_address(va) ( \ + ((va) & my_cpu_data.unimpl_va_mask) != 0 && \ + ((va) & my_cpu_data.unimpl_va_mask) != my_cpu_data.unimpl_va_mask \ +) + +/* + * The unimplemented bits of a physical address must be 0. + * unimpl_pa_mask includes all unimplemented bits, so the result + * of an and operation with the mask must be all 0's for the + * address to be valid. + */ +#define unimplemented_physical_address(pa) ( \ + ((pa) & my_cpu_data.unimpl_pa_mask) != 0 \ +) + +/* + * Handle an illegal operation fault that was caused by an + * unimplemented "brl" instruction. + * If we are not successful (e.g because the illegal operation + * wasn't caused by a "brl" after all), we return -1. + * If we are successful, we return either 0 or the address + * of a "fixup" function for manipulating preserved register + * state. + */ + +struct illegal_op_return +ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec) +{ + unsigned long bundle[2]; + unsigned long opcode, btype, qp, offset; + unsigned long next_ip; + struct siginfo siginfo; + struct illegal_op_return rv; + int tmp_taken, unimplemented_address; + + rv.fkt = (unsigned long) -1; + + /* + * Decode the instruction bundle. + */ + + if (copy_from_user(bundle, (void *) (regs->cr_iip), sizeof(bundle))) + return rv; + + next_ip = (unsigned long) regs->cr_iip + 16; + + /* "brl" must be in slot 2. */ + if (ia64_psr(regs)->ri != 1) return rv; + + /* Must be "mlx" template */ + if ((bundle[0] & 0x1e) != 0x4) return rv; + + opcode = (bundle[1] >> 60); + btype = ((bundle[1] >> 29) & 0x7); + qp = ((bundle[1] >> 23) & 0x3f); + offset = ((bundle[1] & 0x0800000000000000L) << 4) + | ((bundle[1] & 0x00fffff000000000L) >> 32) + | ((bundle[1] & 0x00000000007fffffL) << 40) + | ((bundle[0] & 0xffff000000000000L) >> 24); + + tmp_taken = regs->pr & (1L << qp); + + switch(opcode) { + + case 0xC: + /* + * Long Branch. + */ + if (btype != 0) return rv; + rv.fkt = 0; + if (!(tmp_taken)) { + /* + * Qualifying predicate is 0. + * Skip instruction. + */ + regs->cr_iip = next_ip; + ia64_psr(regs)->ri = 0; + return rv; + } + break; + + case 0xD: + /* + * Long Call. + */ + rv.fkt = 0; + if (!(tmp_taken)) { + /* + * Qualifying predicate is 0. + * Skip instruction. + */ + regs->cr_iip = next_ip; + ia64_psr(regs)->ri = 0; + return rv; + } + + /* + * BR[btype] = IP+16 + */ + switch(btype) { + case 0: + regs->b0 = next_ip; + break; + case 1: + rv.fkt = (unsigned long) &ia64_set_b1; + break; + case 2: + rv.fkt = (unsigned long) &ia64_set_b2; + break; + case 3: + rv.fkt = (unsigned long) &ia64_set_b3; + break; + case 4: + rv.fkt = (unsigned long) &ia64_set_b4; + break; + case 5: + rv.fkt = (unsigned long) &ia64_set_b5; + break; + case 6: + regs->b6 = next_ip; + break; + case 7: + regs->b7 = next_ip; + break; + } + rv.arg1 = next_ip; + + /* + * AR[PFS].pfm = CFM + * AR[PFS].pec = AR[EC] + * AR[PFS].ppl = PSR.cpl + */ + regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff) + | (ar_ec << 52) + | ((unsigned long) ia64_psr(regs)->cpl << 62)); + + /* + * CFM.sof -= CFM.sol + * CFM.sol = 0 + * CFM.sor = 0 + * CFM.rrb.gr = 0 + * CFM.rrb.fr = 0 + * CFM.rrb.pr = 0 + */ + regs->cr_ifs = ((regs->cr_ifs & 0xffffffc00000007f) + - ((regs->cr_ifs >> 7) & 0x7f)); + + break; + + default: + /* + * Unknown opcode. + */ + return rv; + + } + + regs->cr_iip += offset; + ia64_psr(regs)->ri = 0; + + if (ia64_psr(regs)->it == 0) + unimplemented_address = unimplemented_physical_address(regs->cr_iip); + else + unimplemented_address = unimplemented_virtual_address(regs->cr_iip); + + if (unimplemented_address) { + /* + * The target address contains unimplemented bits. + */ + printk("Woah! Unimplemented Instruction Address Trap!\n"); + siginfo.si_signo = SIGILL; + siginfo.si_errno = 0; + siginfo.si_code = ILL_BADIADDR; + force_sig_info(SIGILL, &siginfo, current); + } else if (ia64_psr(regs)->tb) { + /* + * Branch Tracing is enabled. + * Force a taken branch signal. + */ + siginfo.si_signo = SIGTRAP; + siginfo.si_errno = 0; + siginfo.si_code = TRAP_BRANCH; + force_sig_info(SIGTRAP, &siginfo, current); + } else if (ia64_psr(regs)->ss) { + /* + * Single Step is enabled. + * Force a trace signal. + */ + siginfo.si_signo = SIGTRAP; + siginfo.si_errno = 0; + siginfo.si_code = TRAP_TRACE; + force_sig_info(SIGTRAP, &siginfo, current); + } + return rv; +} diff -urN linux-2.4.0-test1/arch/ia64/kernel/efi.c linux-2.4.0-test1-lia/arch/ia64/kernel/efi.c --- linux-2.4.0-test1/arch/ia64/kernel/efi.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/efi.c Thu Jun 1 01:01:16 2000 @@ -5,9 +5,9 @@ * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond - * Copyright (C) 1999 Hewlett-Packard Co. + * Copyright (C) 1999-2000 Hewlett-Packard Co. * Copyright (C) 1999 David Mosberger-Tang - * Copyright (C) 1999 Stephane Eranian + * Copyright (C) 1999-2000 Stephane Eranian * * All EFI Runtime Services are not implemented yet as EFI only * supports physical mode addressing on SoftSDV. This is to be fixed @@ -22,6 +22,7 @@ #include #include +#include #include #define EFI_DEBUG 0 @@ -207,6 +208,61 @@ } } +/* + * Look for the PAL_CODE region reported by EFI and maps it using an + * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor + * Abstraction Layer chapter 11 in ADAG + */ +static void +map_pal_code (void) +{ + void *efi_map_start, *efi_map_end, *p; + efi_memory_desc_t *md; + u64 efi_desc_size; + int pal_code_count=0; + u64 mask, flags; + u64 vaddr; + + efi_map_start = __va(ia64_boot_param.efi_memmap); + efi_map_end = efi_map_start + ia64_boot_param.efi_memmap_size; + efi_desc_size = ia64_boot_param.efi_memdesc_size; + + for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { + md = p; + if (md->type != EFI_PAL_CODE) continue; + + if (++pal_code_count > 1) { + printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n", + md->phys_addr); + continue; + } + mask = ~((1 << _PAGE_SIZE_4M)-1); /* XXX should be dynamic? */ + vaddr = PAGE_OFFSET + md->phys_addr; + + printk(__FUNCTION__": mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n", + md->phys_addr, md->phys_addr + (md->num_pages << 12), + vaddr & mask, (vaddr & mask) + 4*1024*1024); + + /* + * Cannot write to CRx with PSR.ic=1 + */ + ia64_clear_ic(flags); + + /* + * ITR0/DTR0: used for kernel code/data + * ITR1/DTR1: used by HP simulator + * ITR2/DTR2: map PAL code + * ITR3/DTR3: used to map PAL calls buffer + */ + ia64_itr(0x1, 2, vaddr & mask, + pte_val(mk_pte_phys(md->phys_addr, + __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX))), + _PAGE_SIZE_4M); + local_irq_restore(flags); + ia64_srlz_i (); + } +} + void __init efi_init (void) { @@ -291,6 +347,8 @@ } } #endif + + map_pal_code(); } void diff -urN linux-2.4.0-test1/arch/ia64/kernel/efi_stub.S linux-2.4.0-test1-lia/arch/ia64/kernel/efi_stub.S --- linux-2.4.0-test1/arch/ia64/kernel/efi_stub.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/efi_stub.S Thu Jun 1 01:01:29 2000 @@ -1,7 +1,8 @@ /* * EFI call stub. * - * Copyright (C) 1999 David Mosberger + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. We need this because we can't call SetVirtualMap() until @@ -30,6 +31,7 @@ (IA64_PSR_BN) #include +#include .text .psr abi64 @@ -39,53 +41,6 @@ .text /* - * Switch execution mode from virtual to physical or vice versa. - * - * Inputs: - * r16 = new psr to establish - */ - .proc switch_mode -switch_mode: - { - alloc r2=ar.pfs,0,0,0,0 - rsm psr.i | psr.ic // disable interrupts and interrupt collection - mov r15=ip - } - ;; - { - flushrs // must be first insn in group - srlz.i - shr.u r19=r15,61 // r19 <- top 3 bits of current IP - } - ;; - mov cr.ipsr=r16 // set new PSR - add r3=1f-switch_mode,r15 - xor r15=0x7,r19 // flip the region bits - - mov r17=ar.bsp - mov r14=rp // get return address into a general register - - // switch RSE backing store: - ;; - dep r17=r15,r17,61,3 // make ar.bsp physical or virtual - mov r18=ar.rnat // save ar.rnat - ;; - mov ar.bspstore=r17 // this steps on ar.rnat - dep r3=r15,r3,61,3 // make rfi return address physical or virtual - ;; - mov cr.iip=r3 - mov cr.ifs=r0 - dep sp=r15,sp,61,3 // make stack pointer physical or virtual - ;; - mov ar.rnat=r18 // restore ar.rnat - dep r14=r15,r14,61,3 // make function return address physical or virtual - rfi // must be last insn in group - ;; -1: mov rp=r14 - br.ret.sptk.few rp - .endp switch_mode - -/* * Inputs: * in0 = address of function descriptor of EFI routine to call * in1..in7 = arguments to routine @@ -94,13 +49,12 @@ * r8 = EFI_STATUS returned by called function */ - .global efi_call_phys - .proc efi_call_phys -efi_call_phys: - - alloc loc0=ar.pfs,8,5,7,0 +GLOBAL_ENTRY(efi_call_phys) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,5,7,0 ld8 r2=[in0],8 // load EFI function's entry point - mov loc1=rp + mov loc0=rp + UNW(.body) ;; mov loc2=gp // save global pointer mov loc4=ar.rsc // save RSE configuration @@ -121,7 +75,7 @@ ;; andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared mov out3=in4 - br.call.sptk.few rp=switch_mode + br.call.sptk.few rp=ia64_switch_mode .ret0: mov out4=in5 mov out5=in6 @@ -130,12 +84,11 @@ .ret1: mov ar.rsc=r0 // put RSE in enforced lazy, LE mode mov r16=loc3 - br.call.sptk.few rp=switch_mode // return to virtual mode + br.call.sptk.few rp=ia64_switch_mode // return to virtual mode .ret2: mov ar.rsc=loc4 // restore RSE configuration - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc1 + mov rp=loc0 mov gp=loc2 br.ret.sptk.few rp - - .endp efi_call_phys +END(efi_call_phys) diff -urN linux-2.4.0-test1/arch/ia64/kernel/entry.S linux-2.4.0-test1-lia/arch/ia64/kernel/entry.S --- linux-2.4.0-test1/arch/ia64/kernel/entry.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/entry.S Fri Jun 9 17:09:56 2000 @@ -13,8 +13,6 @@ /* * Global (preserved) predicate usage on syscall entry/exit path: * - * - * pEOI: See entry.h. * pKern: See entry.h. * pSys: See entry.h. * pNonSys: !pSys @@ -30,6 +28,7 @@ #include #include #include +#include #include "entry.h" @@ -42,11 +41,11 @@ * execve() is special because in case of success, we need to * setup a null register window frame. */ - .align 16 - .proc ia64_execve -ia64_execve: - alloc loc0=ar.pfs,3,2,4,0 - mov loc1=rp +ENTRY(ia64_execve) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)) + alloc loc1=ar.pfs,3,2,4,0 + mov loc0=rp + UNW(.body) mov out0=in0 // filename ;; // stop bit between alloc and call mov out1=in1 // argv @@ -54,25 +53,22 @@ add out3=16,sp // regs br.call.sptk.few rp=sys_execve .ret0: cmp4.ge p6,p0=r8,r0 - mov ar.pfs=loc0 // restore ar.pfs + mov ar.pfs=loc1 // restore ar.pfs ;; (p6) mov ar.pfs=r0 // clear ar.pfs in case of success sxt4 r8=r8 // return 64-bit result - mov rp=loc1 + mov rp=loc0 br.ret.sptk.few rp - .endp ia64_execve +END(ia64_execve) - .align 16 - .global sys_clone - .proc sys_clone -sys_clone: +GLOBAL_ENTRY(sys_clone) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)) alloc r16=ar.pfs,2,2,3,0;; - movl r28=1f - mov loc1=rp - br.cond.sptk.many save_switch_stack -1: - mov loc0=r16 // save ar.pfs across do_fork + mov loc0=rp + DO_SAVE_SWITCH_STACK + mov loc1=r16 // save ar.pfs across do_fork + UNW(.body) adds out2=IA64_SWITCH_STACK_SIZE+16,sp adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp cmp.eq p8,p9=in1,r0 // usp == 0? @@ -82,24 +78,22 @@ (p9) mov out1=in1 br.call.sptk.few rp=do_fork .ret1: - mov ar.pfs=loc0 + mov ar.pfs=loc1 + UNW(.restore sp) adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack - mov rp=loc1 + mov rp=loc0 ;; br.ret.sptk.many rp - .endp sys_clone +END(sys_clone) /* - * prev_task <- switch_to(struct task_struct *next) + * prev_task <- ia64_switch_to(struct task_struct *next) */ - .align 16 - .global ia64_switch_to - .proc ia64_switch_to -ia64_switch_to: +GLOBAL_ENTRY(ia64_switch_to) + UNW(.prologue) alloc r16=ar.pfs,1,0,0,0 - movl r28=1f - br.cond.sptk.many save_switch_stack -1: + DO_SAVE_SWITCH_STACK + UNW(.body) // disable interrupts to ensure atomicity for next few instructions: mov r17=psr // M-unit ;; @@ -123,66 +117,60 @@ mov psr.l=r17 ;; srlz.d - - movl r28=1f - br.cond.sptk.many load_switch_stack -1: + DO_LOAD_SWITCH_STACK( ) br.ret.sptk.few rp - .endp ia64_switch_to +END(ia64_switch_to) +#ifndef CONFIG_IA64_NEW_UNWIND /* * Like save_switch_stack, but also save the stack frame that is active * at the time this function is called. */ - .align 16 - .proc save_switch_stack_with_current_frame -save_switch_stack_with_current_frame: -1: { - alloc r16=ar.pfs,0,0,0,0 // pass ar.pfs to save_switch_stack - mov r28=ip - } - ;; - adds r28=1f-1b,r28 - br.cond.sptk.many save_switch_stack -1: br.ret.sptk.few rp - .endp save_switch_stack_with_current_frame +ENTRY(save_switch_stack_with_current_frame) + UNW(.prologue) + alloc r16=ar.pfs,0,0,0,0 // pass ar.pfs to save_switch_stack + DO_SAVE_SWITCH_STACK + br.ret.sptk.few rp +END(save_switch_stack_with_current_frame) +#endif /* !CONFIG_IA64_NEW_UNWIND */ + /* * Note that interrupts are enabled during save_switch_stack and * load_switch_stack. This means that we may get an interrupt with * "sp" pointing to the new kernel stack while ar.bspstore is still * pointing to the old kernel backing store area. Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, - * this is not a problem. + * this is not a problem. Also, we don't need to specify unwind + * information for preserved registers that are not modified in + * save_switch_stack as the right unwind information is already + * specified at the call-site of save_switch_stack. */ /* * save_switch_stack: * - r16 holds ar.pfs - * - r28 holds address to return to + * - b7 holds address to return to * - rp (b0) holds return address to save */ - .align 16 - .global save_switch_stack - .proc save_switch_stack -save_switch_stack: +GLOBAL_ENTRY(save_switch_stack) + UNW(.prologue) + UNW(.altrp b7) flushrs // flush dirty regs to backing store (must be first in insn group) mov r17=ar.unat // preserve caller's - adds r2=-IA64_SWITCH_STACK_SIZE+16,sp // r2 = &sw->caller_unat + adds r2=16,sp // r2 = &sw->caller_unat ;; mov r18=ar.fpsr // preserve fpsr mov ar.rsc=r0 // put RSE in mode: enforced lazy, little endian, pl 0 ;; mov r19=ar.rnat - adds r3=-IA64_SWITCH_STACK_SIZE+24,sp // r3 = &sw->ar_fpsr - - // Note: the instruction ordering is important here: we can't - // store anything to the switch stack before sp is updated - // as otherwise an interrupt might overwrite the memory! - adds sp=-IA64_SWITCH_STACK_SIZE,sp + adds r3=24,sp // r3 = &sw->ar_fpsr ;; + .savesp ar.unat,SW(CALLER_UNAT) st8 [r2]=r17,16 + .savesp ar.fpsr,SW(AR_FPSR) st8 [r3]=r18,24 ;; + UNW(.body) stf.spill [r2]=f2,32 stf.spill [r3]=f3,32 mov r21=b0 @@ -259,16 +247,17 @@ st8 [r3]=r21 // save predicate registers mov ar.rsc=3 // put RSE back into eager mode, pl 0 br.cond.sptk.few b7 - .endp save_switch_stack +END(save_switch_stack) /* * load_switch_stack: - * - r28 holds address to return to + * - b7 holds address to return to */ - .align 16 - .proc load_switch_stack -load_switch_stack: +ENTRY(load_switch_stack) + UNW(.prologue) + UNW(.altrp b7) invala // invalidate ALAT + UNW(.body) adds r2=IA64_SWITCH_STACK_B0_OFFSET+16,sp // get pointer to switch_stack.b0 mov ar.rsc=r0 // put RSE into enforced lazy mode adds r3=IA64_SWITCH_STACK_B0_OFFSET+24,sp // get pointer to switch_stack.b1 @@ -353,21 +342,16 @@ ;; ld8.fill r4=[r2],16 ld8.fill r5=[r3],16 - mov b7=r28 ;; ld8.fill r6=[r2],16 ld8.fill r7=[r3],16 mov ar.unat=r18 // restore caller's unat mov ar.fpsr=r19 // restore fpsr mov ar.rsc=3 // put RSE back into eager mode, pl 0 - adds sp=IA64_SWITCH_STACK_SIZE,sp // pop switch_stack br.cond.sptk.few b7 - .endp load_switch_stack +END(load_switch_stack) - .align 16 - .global __ia64_syscall - .proc __ia64_syscall -__ia64_syscall: +GLOBAL_ENTRY(__ia64_syscall) .regstk 6,0,0,0 mov r15=in5 // put syscall number in place break __BREAK_SYSCALL @@ -377,30 +361,42 @@ (p6) st4 [r2]=r8 (p6) mov r8=-1 br.ret.sptk.few rp - .endp __ia64_syscall +END(__ia64_syscall) // // We invoke syscall_trace through this intermediate function to // ensure that the syscall input arguments are not clobbered. We // also use it to preserve b6, which contains the syscall entry point. // - .align 16 - .global invoke_syscall_trace - .proc invoke_syscall_trace -invoke_syscall_trace: - alloc loc0=ar.pfs,8,3,0,0 +GLOBAL_ENTRY(invoke_syscall_trace) +#ifdef CONFIG_IA64_NEW_UNWIND + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,3,0,0 + mov loc0=rp + UNW(.body) + mov loc2=b6 + ;; + br.call.sptk.few rp=syscall_trace +.ret3: mov rp=loc0 + mov ar.pfs=loc1 + mov b6=loc2 + br.ret.sptk.few rp +#else /* !CONFIG_IA64_NEW_SYSCALL */ + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,3,0,0 ;; // WAW on CFM at the br.call - mov loc1=rp + mov loc0=rp br.call.sptk.many rp=save_switch_stack_with_current_frame // must preserve b6!! .ret2: mov loc2=b6 br.call.sptk.few rp=syscall_trace .ret3: adds sp=IA64_SWITCH_STACK_SIZE,sp // drop switch_stack frame - mov rp=loc1 - mov ar.pfs=loc0 + mov rp=loc0 + mov ar.pfs=loc1 mov b6=loc2 ;; br.ret.sptk.few rp - .endp invoke_syscall_trace +#endif /* !CONFIG_IA64_NEW_SYSCALL */ +END(invoke_syscall_trace) // // Invoke a system call, but do some tracing before and after the call. @@ -414,19 +410,19 @@ // .global ia64_trace_syscall .global ia64_strace_leave_kernel - .global ia64_strace_clear_r8 - .proc ia64_strace_clear_r8 -ia64_strace_clear_r8: // this is where we return after cloning when PF_TRACESYS is on +GLOBAL_ENTRY(ia64_strace_clear_r8) + // this is where we return after cloning when PF_TRACESYS is on + PT_REGS_UNWIND_INFO # ifdef CONFIG_SMP br.call.sptk.few rp=invoke_schedule_tail # endif mov r8=0 br strace_check_retval - .endp ia64_strace_clear_r8 +END(ia64_strace_clear_r8) - .proc ia64_trace_syscall -ia64_trace_syscall: +ENTRY(ia64_trace_syscall) + PT_REGS_UNWIND_INFO br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args .ret4: br.call.sptk.few rp=b6 // do the syscall strace_check_retval: @@ -454,7 +450,7 @@ (p6) mov r10=-1 (p6) mov r8=r9 br.cond.sptk.few strace_save_retval - .endp ia64_trace_syscall +END(ia64_trace_syscall) /* * A couple of convenience macros to help implement/understand the state @@ -472,12 +468,8 @@ #define rKRBS r22 #define rB6 r21 - .align 16 - .global ia64_ret_from_syscall - .global ia64_ret_from_syscall_clear_r8 - .global ia64_leave_kernel - .proc ia64_ret_from_syscall -ia64_ret_from_syscall_clear_r8: +GLOBAL_ENTRY(ia64_ret_from_syscall_clear_r8) + PT_REGS_UNWIND_INFO #ifdef CONFIG_SMP // In SMP mode, we need to call schedule_tail to complete the scheduling process. // Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the @@ -487,7 +479,10 @@ #endif mov r8=0 ;; // added stop bits to prevent r8 dependency -ia64_ret_from_syscall: +END(ia64_ret_from_syscall_clear_r8) + // fall through +GLOBAL_ENTRY(ia64_ret_from_syscall) + PT_REGS_UNWIND_INFO cmp.ge p6,p7=r8,r0 // syscall executed successfully? adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 adds r3=IA64_PT_REGS_R8_OFFSET+32,sp // r3 = &pt_regs.r10 @@ -497,19 +492,21 @@ .mem.offset 8,0 (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit (p7) br.cond.spnt.few handle_syscall_error // handle potential syscall failure - -ia64_leave_kernel: +END(ia64_ret_from_syscall) + // fall through +GLOBAL_ENTRY(ia64_leave_kernel) // check & deliver software interrupts: + PT_REGS_UNWIND_INFO #ifdef CONFIG_SMP - adds r2=IA64_TASK_PROCESSOR_OFFSET,r13 - movl r3=softirq_state + adds r2=IA64_TASK_PROCESSOR_OFFSET,r13 + movl r3=softirq_state ;; - ld4 r2=[r2] + ld4 r2=[r2] ;; - shl r2=r2,SMP_LOG_CACHE_BYTES // can't use shladd here... + shl r2=r2,SMP_LOG_CACHE_BYTES // can't use shladd here... ;; - add r3=r2,r3 + add r3=r2,r3 #else movl r3=softirq_state #endif @@ -538,32 +535,28 @@ ld4 r14=[r14] mov rp=r3 // arrange for schedule() to return to back_from_resched ;; - /* - * If pEOI is set, we need to write the cr.eoi now and then - * clear pEOI because both invoke_schedule() and - * handle_signal_delivery() may call the scheduler. Since - * we're returning to user-level, we get at most one nested - * interrupt of the same priority level, which doesn't tax the - * kernel stack too much. - */ -(pEOI) mov cr.eoi=r0 cmp.ne p6,p0=r2,r0 cmp.ne p2,p0=r14,r0 // NOTE: pKern is an alias for p2!! -(pEOI) cmp.ne pEOI,p0=r0,r0 // clear pEOI before calling schedule() srlz.d (p6) br.call.spnt.many b6=invoke_schedule // ignore return value 2: // check & deliver pending signals: (p2) br.call.spnt.few rp=handle_signal_delivery -#if defined(CONFIG_SMP) || defined(CONFIG_IA64_SOFTSDV_HACKS) +#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS) // Check for lost ticks + rsm psr.i mov r2 = ar.itc + movl r14 = 1000 // latency tolerance mov r3 = cr.itm ;; sub r2 = r2, r3 ;; + sub r2 = r2, r14 + ;; cmp.ge p6,p7 = r2, r0 (p6) br.call.spnt.few rp=invoke_ia64_reset_itm + ;; + ssm psr.i #endif restore_all: @@ -692,18 +685,6 @@ ;; add r18=r16,r18 // adjust the loadrs value ;; -#ifdef CONFIG_IA64_SOFTSDV_HACKS - // Reset ITM if we've missed a timer tick. Workaround for SoftSDV bug - mov r16 = r2 - mov r2 = ar.itc - mov r17 = cr.itm - ;; - cmp.gt p6,p7 = r2, r17 -(p6) addl r17 = 100, r2 - ;; - mov cr.itm = r17 - mov r2 = r16 -#endif dont_preserve_current_frame: alloc r16=ar.pfs,0,0,0,0 // drop the current call frame (noop for syscalls) ;; @@ -724,14 +705,14 @@ mov ar.rsc=rARRSC mov ar.unat=rARUNAT mov cr.ifs=rCRIFS // restore cr.ifs only if not a (synchronous) syscall -(pEOI) mov cr.eoi=r0 mov pr=rARPR,-1 mov cr.iip=rCRIIP mov cr.ipsr=rCRIPSR ;; rfi;; // must be last instruction in an insn group +END(ia64_leave_kernel) -handle_syscall_error: +ENTRY(handle_syscall_error) /* * Some system calls (e.g., ptrace, mmap) can return arbitrary * values which could lead us to mistake a negative return @@ -740,6 +721,7 @@ * If pt_regs.r8 is zero, we assume that the call completed * successfully. */ + PT_REGS_UNWIND_INFO ld8 r3=[r2] // load pt_regs.r8 sub r9=0,r8 // negate return value to get errno ;; @@ -753,205 +735,234 @@ .mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit .mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit br.cond.sptk.many ia64_leave_kernel - .endp handle_syscall_error +END(handle_syscall_error) #ifdef CONFIG_SMP /* * Invoke schedule_tail(task) while preserving in0-in7, which may be needed * in case a system call gets restarted. */ - .proc invoke_schedule_tail -invoke_schedule_tail: - alloc loc0=ar.pfs,8,2,1,0 - mov loc1=rp +ENTRY(invoke_schedule_tail) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,2,1,0 + mov loc0=rp mov out0=r8 // Address of previous task ;; br.call.sptk.few rp=schedule_tail .ret8: - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc1 + mov rp=loc0 br.ret.sptk.many rp - .endp invoke_schedule_tail +END(invoke_schedule_tail) + +#endif /* CONFIG_SMP */ - .proc invoke_ia64_reset_itm -invoke_ia64_reset_itm: - alloc loc0=ar.pfs,8,2,0,0 - mov loc1=rp +#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS) + +ENTRY(invoke_ia64_reset_itm) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,2,0,0 + mov loc0=rp ;; + UNW(.body) br.call.sptk.many rp=ia64_reset_itm ;; - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc1 + mov rp=loc0 br.ret.sptk.many rp - .endp invoke_ia64_reset_itm +END(invoke_ia64_reset_itm) -#endif /* CONFIG_SMP */ +#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC || CONFIG_IA64_SOFTSDV_HACKS */ /* * Invoke do_softirq() while preserving in0-in7, which may be needed * in case a system call gets restarted. */ - .proc invoke_do_softirq -invoke_do_softirq: - alloc loc0=ar.pfs,8,2,0,0 - mov loc1=rp -(pEOI) mov cr.eoi=r0 +ENTRY(invoke_do_softirq) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,2,0,0 + mov loc0=rp ;; -(pEOI) cmp.ne pEOI,p0=r0,r0 + UNW(.body) br.call.sptk.few rp=do_softirq .ret9: - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc1 + mov rp=loc0 br.ret.sptk.many rp - .endp invoke_do_softirq +END(invoke_do_softirq) /* * Invoke schedule() while preserving in0-in7, which may be needed * in case a system call gets restarted. */ - .proc invoke_schedule -invoke_schedule: - alloc loc0=ar.pfs,8,2,0,0 - mov loc1=rp +ENTRY(invoke_schedule) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)) + alloc loc1=ar.pfs,8,2,0,0 + mov loc0=rp ;; + UNW(.body) br.call.sptk.few rp=schedule .ret10: - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc1 + mov rp=loc0 br.ret.sptk.many rp - .endp invoke_schedule +END(invoke_schedule) // // Setup stack and call ia64_do_signal. Note that pSys and pNonSys need to // be set up by the caller. We declare 8 input registers so the system call // args get preserved, in case we need to restart a system call. // - .align 16 - .proc handle_signal_delivery -handle_signal_delivery: - alloc loc0=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! +ENTRY(handle_signal_delivery) +#ifdef CONFIG_IA64_NEW_UNWIND + .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) + alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! mov r9=ar.unat - - // If the process is being ptraced, the signal may not actually be delivered to - // the process. Instead, SIGCHLD will be sent to the parent. We need to - // setup a switch_stack so ptrace can inspect the processes state if necessary. - adds r2=IA64_TASK_FLAGS_OFFSET,r13 - ;; - ld8 r2=[r2] + mov loc0=rp // save return address + .body mov out0=0 // there is no "oldset" - adds out1=16,sp // out1=&pt_regs - ;; + adds out1=0,sp // out1=&sigscratch (pSys) mov out2=1 // out2==1 => we're in a syscall - tbit.nz p16,p17=r2,PF_PTRACED_BIT -(p16) br.cond.spnt.many setup_switch_stack ;; -back_from_setup_switch_stack: (pNonSys) mov out2=0 // out2==0 => not a syscall - adds r3=-IA64_SWITCH_STACK_SIZE+IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp -(p17) adds sp=-IA64_SWITCH_STACK_SIZE,sp // make space for (dummy) switch_stack - ;; -(p17) st8 [r3]=r9 // save ar.unat in sw->caller_unat - mov loc1=rp // save return address + .fframe 16 + .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) + st8 [sp]=r9,-16 // allocate space for ar.unat and save it br.call.sptk.few rp=ia64_do_signal .ret11: - adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp + .restore sp + adds sp=16,sp // pop scratch stack space ;; - ld8 r9=[r3] // load new unat from sw->caller_unat - mov rp=loc1 + ld8 r9=[sp] // load new unat from sw->caller_unat + mov rp=loc0 ;; -(p17) adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch_stack -(p17) mov ar.unat=r9 -(p17) mov ar.pfs=loc0 -(p17) br.ret.sptk.many rp - - // restore the switch stack (ptrace may have modified it): - movl r28=1f - br.cond.sptk.many load_switch_stack -1: br.ret.sptk.many rp - // NOT REACHED - -setup_switch_stack: - movl r28=back_from_setup_switch_stack - mov r16=loc0 - br.cond.sptk.many save_switch_stack - // NOT REACHED - - .endp handle_signal_delivery - - .align 16 - .proc sys_rt_sigsuspend - .global sys_rt_sigsuspend -sys_rt_sigsuspend: - alloc loc0=ar.pfs,2,2,3,0 - - // If the process is being ptraced, the signal may not actually be delivered to - // the process. Instead, SIGCHLD will be sent to the parent. We need to - // setup a switch_stack so ptrace can inspect the processes state if necessary. - // Also, the process might not ptraced until stopped in sigsuspend, so this - // isn't something that we can do conditionally based upon the value of - // PF_PTRACED_BIT. + mov ar.unat=r9 + mov ar.pfs=loc1 + br.ret.sptk.many rp +#else /* !CONFIG_IA64_NEW_UNWIND */ + .prologue + alloc r16=ar.pfs,8,0,3,0 // preserve all eight input regs in case of syscall restart! + DO_SAVE_SWITCH_STACK + UNW(.body) + + mov out0=0 // there is no "oldset" + adds out1=16,sp // out1=&sigscratch + .pred.rel.mutex pSys, pNonSys +(pSys) mov out2=1 // out2==1 => we're in a syscall +(pNonSys) mov out2=0 // out2==0 => not a syscall + br.call.sptk.few rp=ia64_do_signal +.ret11: + // restore the switch stack (ptrace may have modified it) + DO_LOAD_SWITCH_STACK( ) + br.ret.sptk.many rp +#endif /* !CONFIG_IA64_NEW_UNWIND */ +END(handle_signal_delivery) + +GLOBAL_ENTRY(sys_rt_sigsuspend) +#ifdef CONFIG_IA64_NEW_UNWIND + .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) + alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! + mov r9=ar.unat + mov loc0=rp // save return address mov out0=in0 // mask mov out1=in1 // sigsetsize + adds out2=0,sp // out2=&sigscratch ;; - adds out2=16,sp // out1=&pt_regs - movl r28=back_from_sigsuspend_setup_switch_stack - mov r16=loc0 - br.cond.sptk.many save_switch_stack - ;; -back_from_sigsuspend_setup_switch_stack: - mov loc1=rp // save return address - br.call.sptk.many rp=ia64_rt_sigsuspend + .fframe 16 + .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!) + st8 [sp]=r9,-16 // allocate space for ar.unat and save it + .body + br.call.sptk.few rp=ia64_rt_sigsuspend .ret12: - adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp + .restore sp + adds sp=16,sp // pop scratch stack space ;; - ld8 r9=[r3] // load new unat from sw->caller_unat - mov rp=loc1 + ld8 r9=[sp] // load new unat from sw->caller_unat + mov rp=loc0 ;; + mov ar.unat=r9 + mov ar.pfs=loc1 + br.ret.sptk.many rp +#else /* !CONFIG_IA64_NEW_UNWIND */ + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)) + alloc r16=ar.pfs,2,0,3,0 + DO_SAVE_SWITCH_STACK + UNW(.body) - // restore the switch stack (ptrace may have modified it): - movl r28=1f - br.cond.sptk.many load_switch_stack -1: br.ret.sptk.many rp - // NOT REACHED - .endp sys_rt_sigsuspend - - .align 16 - .proc sys_rt_sigreturn -sys_rt_sigreturn: + mov out0=in0 // mask + mov out1=in1 // sigsetsize + adds out2=16,sp // out1=&sigscratch + br.call.sptk.many rp=ia64_rt_sigsuspend +.ret12: + // restore the switch stack (ptrace may have modified it) + DO_LOAD_SWITCH_STACK( ) + br.ret.sptk.many rp +#endif /* !CONFIG_IA64_NEW_UNWIND */ +END(sys_rt_sigsuspend) + +ENTRY(sys_rt_sigreturn) +#ifdef CONFIG_IA64_NEW_UNWIND .regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler() - adds out0=16,sp // out0 = &pt_regs - adds sp=-IA64_SWITCH_STACK_SIZE,sp // make space for unat and padding + PT_REGS_UNWIND_INFO + .prologue + PT_REGS_SAVES(16) + adds sp=-16,sp + .body + cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall... + ;; + adds out0=16,sp // out0 = &sigscratch + br.call.sptk.few rp=ia64_rt_sigreturn +.ret13: + adds sp=16,sp // doesn't drop pt_regs, so don't mark it as restoring sp! + PT_REGS_UNWIND_INFO // instead, create a new body section with the smaller frame + ;; + ld8 r9=[sp] // load new ar.unat + mov b7=r8 ;; + mov ar.unat=r9 + br b7 +#else /* !CONFIG_IA64_NEW_UNWIND */ + .regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler() + PT_REGS_UNWIND_INFO + UNW(.prologue) + UNW(.fframe IA64_PT_REGS_SIZE+IA64_SWITCH_STACK_SIZE) + UNW(.spillsp rp, PT(CR_IIP)+IA64_SWITCH_STACK_SIZE) + UNW(.spillsp ar.pfs, PT(CR_IFS)+IA64_SWITCH_STACK_SIZE) + UNW(.spillsp ar.unat, PT(AR_UNAT)+IA64_SWITCH_STACK_SIZE) + UNW(.spillsp pr, PT(PR)+IA64_SWITCH_STACK_SIZE) + adds sp=-IA64_SWITCH_STACK_SIZE,sp cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall... + ;; + UNW(.body) + + adds out0=16,sp // out0 = &sigscratch br.call.sptk.few rp=ia64_rt_sigreturn .ret13: adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp ;; ld8 r9=[r3] // load new ar.unat - mov rp=r8 + mov b7=r8 ;; + PT_REGS_UNWIND_INFO adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch-stack frame mov ar.unat=r9 - br rp - .endp sys_rt_sigreturn + br b7 +#endif /* !CONFIG_IA64_NEW_UNWIND */ +END(sys_rt_sigreturn) - .align 16 - .global ia64_prepare_handle_unaligned - .proc ia64_prepare_handle_unaligned -ia64_prepare_handle_unaligned: - movl r28=1f +GLOBAL_ENTRY(ia64_prepare_handle_unaligned) // // r16 = fake ar.pfs, we simply need to make sure // privilege is still 0 // + PT_REGS_UNWIND_INFO mov r16=r0 - br.cond.sptk.few save_switch_stack -1: br.call.sptk.few rp=ia64_handle_unaligned // stack frame setup in ivt + DO_SAVE_SWITCH_STACK + br.call.sptk.few rp=ia64_handle_unaligned // stack frame setup in ivt .ret14: - movl r28=2f - br.cond.sptk.many load_switch_stack -2: br.cond.sptk.many rp // goes to ia64_leave_kernel - .endp ia64_prepare_handle_unaligned + DO_LOAD_SWITCH_STACK(PT_REGS_UNWIND_INFO) + br.cond.sptk.many rp // goes to ia64_leave_kernel +END(ia64_prepare_handle_unaligned) .rodata .align 8 @@ -1065,7 +1076,7 @@ data8 sys_setdomainname data8 sys_newuname // 1130 data8 sys_adjtimex - data8 sys_create_module + data8 ia64_create_module data8 sys_init_module data8 sys_delete_module data8 sys_get_kernel_syms // 1135 @@ -1212,4 +1223,3 @@ data8 ia64_ni_syscall data8 ia64_ni_syscall data8 ia64_ni_syscall - diff -urN linux-2.4.0-test1/arch/ia64/kernel/entry.h linux-2.4.0-test1-lia/arch/ia64/kernel/entry.h --- linux-2.4.0-test1/arch/ia64/kernel/entry.h Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/entry.h Thu Jun 1 01:02:23 2000 @@ -2,7 +2,60 @@ * Preserved registers that are shared between code in ivt.S and entry.S. Be * careful not to step on these! */ -#define pEOI p1 /* should leave_kernel write EOI? */ #define pKern p2 /* will leave_kernel return to kernel-mode? */ #define pSys p4 /* are we processing a (synchronous) system call? */ #define pNonSys p5 /* complement of pSys */ + +#define PT(f) (IA64_PT_REGS_##f##_OFFSET + 16) +#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET + 16) + +#define PT_REGS_UNWIND_INFO \ + UNW(.prologue); \ + UNW(.unwabi @svr4, 'i'); \ + UNW(.fframe IA64_PT_REGS_SIZE+16); \ + UNW(.spillsp rp, PT(CR_IIP)); \ + UNW(.spillsp ar.pfs, PT(CR_IFS)); \ + UNW(.spillsp ar.unat, PT(AR_UNAT)); \ + UNW(.spillsp pr, PT(PR)); \ + UNW(.body) + +#define SAVE_SWITCH_STACK_UNWIND_INFO \ + .savesp ar.unat,SW(CALLER_UNAT); .savesp ar.fpsr,SW(AR_FPSR)); \ + UNW(.spillsp f2,SW(F2)); UNW(.spillsp f3,SW(F3)); \ + UNW(.spillsp f4,SW(F4)); UNW(.spillsp f5,SW(F5)); \ + UNW(.spillsp f16,SW(F16)); UNW(.spillsp f17,SW(F17)); \ + UNW(.spillsp f18,SW(F18)); UNW(.spillsp f19,SW(F19)); \ + UNW(.spillsp f20,SW(F20)); UNW(.spillsp f21,SW(F21)); \ + UNW(.spillsp f22,SW(F22)); UNW(.spillsp f23,SW(F23)); \ + UNW(.spillsp f24,SW(F24)); UNW(.spillsp f25,SW(F25)); \ + UNW(.spillsp f26,SW(F26)); UNW(.spillsp f27,SW(F27)); \ + UNW(.spillsp f28,SW(F28)); UNW(.spillsp f29,SW(F29)); \ + UNW(.spillsp f30,SW(F30)); UNW(.spillsp f31,SW(F31)); \ + UNW(.spillsp r4,SW(R4)); UNW(.spillsp r5,SW(R5)); \ + UNW(.spillsp r6,SW(R6)); UNW(.spillsp r7,SW(R7)); \ + UNW(.spillsp b1,SW(B1)); UNW(.spillsp b2,SW(B2)); \ + UNW(.spillsp b3,SW(B3)); UNW(.spillsp b4,SW(B4)); \ + UNW(.spillsp b5,SW(B5)); \ + UNW(.spillsp ar.pfs,SW(AR_PFS)); UNW(.spillsp ar.lc,SW(AR_LC)); \ + UNW(.spillsp @priunat,SW(AR_UNAT)); \ + UNW(.spillsp ar.rnat,SW(AR_RNAT)); UNW(.spillsp ar.bspstore,SW(AR_BSPSTORE)); \ + UNW(.spillsp pr,SW(PR)) + +#define DO_SAVE_SWITCH_STACK \ + movl r28=1f; \ + ;; \ + .fframe IA64_SWITCH_STACK_SIZE; \ + adds sp=-IA64_SWITCH_STACK_SIZE,sp; \ + mov b7=r28; \ + SAVE_SWITCH_STACK_UNWIND_INFO; \ + br.cond.sptk.many save_switch_stack; \ +1: + +#define DO_LOAD_SWITCH_STACK(extra) \ + movl r28=1f; \ + ;; \ + mov b7=r28; \ + br.cond.sptk.many load_switch_stack; \ +1: UNW(.restore sp); \ + extra; \ + adds sp=IA64_SWITCH_STACK_SIZE,sp diff -urN linux-2.4.0-test1/arch/ia64/kernel/fw-emu.c linux-2.4.0-test1-lia/arch/ia64/kernel/fw-emu.c --- linux-2.4.0-test1/arch/ia64/kernel/fw-emu.c Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/fw-emu.c Thu Jun 1 01:02:33 2000 @@ -124,7 +124,18 @@ .proc pal_emulator_static pal_emulator_static: mov r8=-1 - cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ + + mov r9=256 + ;; + cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */ +(p6) br.cond.sptk.few static + ;; + mov r9=512 + ;; + cmp.gtu p6,p7=r9,r28 +(p6) br.cond.sptk.few stacked + ;; +static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ (p7) br.cond.sptk.few 1f ;; mov r8=0 /* status = 0 */ @@ -157,7 +168,12 @@ ;; mov ar.lc=r9 mov r8=r0 -1: br.cond.sptk.few rp +1: + br.cond.sptk.few rp + +stacked: + br.ret.sptk.few rp + .endp pal_emulator_static\n"); /* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */ diff -urN linux-2.4.0-test1/arch/ia64/kernel/gate.S linux-2.4.0-test1-lia/arch/ia64/kernel/gate.S --- linux-2.4.0-test1/arch/ia64/kernel/gate.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/gate.S Thu May 25 22:57:57 2000 @@ -3,10 +3,11 @@ * each task's text region. For now, it contains the signal * trampoline code only. * - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 David Mosberger-Tang + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger-Tang */ +#include #include #include #include @@ -75,15 +76,12 @@ * [sp+16] = sigframe */ - .global ia64_sigtramp - .proc ia64_sigtramp -ia64_sigtramp: +GLOBAL_ENTRY(ia64_sigtramp) ld8 r10=[r3],8 // get signal handler entry point br.call.sptk.many rp=invoke_sighandler - .endp ia64_sigtramp +END(ia64_sigtramp) - .proc invoke_sighandler -invoke_sighandler: +ENTRY(invoke_sighandler) ld8 gp=[r3] // get signal handler's global pointer mov b6=r10 cover // push args in interrupted frame onto backing store @@ -152,10 +150,9 @@ ldf.fill f15=[base1],32 mov r15=__NR_rt_sigreturn break __BREAK_SYSCALL - .endp invoke_sighandler +END(invoke_sighandler) - .proc setup_rbs -setup_rbs: +ENTRY(setup_rbs) flushrs // must be first in insn mov ar.rsc=r0 // put RSE into enforced lazy mode adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp @@ -167,9 +164,9 @@ mov ar.rsc=0xf // set RSE into eager mode, pl 3 invala // invalidate ALAT br.cond.sptk.many back_from_setup_rbs +END(setup_rbs) - .proc restore_rbs -restore_rbs: +ENTRY(restore_rbs) flushrs mov ar.rsc=r0 // put RSE into enforced lazy mode adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp @@ -181,5 +178,4 @@ mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc) // invala not necessary as that will happen when returning to user-mode br.cond.sptk.many back_from_restore_rbs - - .endp restore_rbs +END(restore_rbs) diff -urN linux-2.4.0-test1/arch/ia64/kernel/head.S linux-2.4.0-test1-lia/arch/ia64/kernel/head.S --- linux-2.4.0-test1/arch/ia64/kernel/head.S Sun Feb 13 10:30:38 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/head.S Thu Jun 1 01:02:47 2000 @@ -16,6 +16,7 @@ #include +#include #include #include #include @@ -54,10 +55,12 @@ stringz "Halting kernel\n" .text - .align 16 - .global _start - .proc _start -_start: + +GLOBAL_ENTRY(_start) + UNW(.prologue) + UNW(.save rp, r4) // terminate unwind chain with a NULL rp + UNW(mov r4=r0) + .body // set IVT entry point---can't access I/O ports without it movl r3=ia64_ivt ;; @@ -156,12 +159,9 @@ ld8 out0=[r2] br.call.sptk.few b0=console_print self: br.sptk.few self // endless loop - .endp _start +END(_start) - .align 16 - .global ia64_save_debug_regs - .proc ia64_save_debug_regs -ia64_save_debug_regs: +GLOBAL_ENTRY(ia64_save_debug_regs) alloc r16=ar.pfs,1,0,0,0 mov r20=ar.lc // preserve ar.lc mov ar.lc=IA64_NUM_DBG_REGS-1 @@ -177,13 +177,10 @@ br.cloop.sptk.few 1b ;; mov ar.lc=r20 // restore ar.lc - br.ret.sptk.few b0 - .endp ia64_save_debug_regs + br.ret.sptk.few rp +END(ia64_save_debug_regs) - .align 16 - .global ia64_load_debug_regs - .proc ia64_load_debug_regs -ia64_load_debug_regs: +GLOBAL_ENTRY(ia64_load_debug_regs) alloc r16=ar.pfs,1,0,0,0 lfetch.nta [in0] mov r20=ar.lc // preserve ar.lc @@ -200,13 +197,10 @@ br.cloop.sptk.few 1b ;; mov ar.lc=r20 // restore ar.lc - br.ret.sptk.few b0 - .endp ia64_load_debug_regs + br.ret.sptk.few rp +END(ia64_load_debug_regs) - .align 16 - .global __ia64_save_fpu - .proc __ia64_save_fpu -__ia64_save_fpu: +GLOBAL_ENTRY(__ia64_save_fpu) alloc r2=ar.pfs,1,0,0,0 adds r3=16,in0 ;; @@ -354,12 +348,9 @@ stf.spill.nta [in0]=f126,32 stf.spill.nta [ r3]=f127,32 br.ret.sptk.few rp - .endp __ia64_save_fpu +END(__ia64_save_fpu) - .align 16 - .global __ia64_load_fpu - .proc __ia64_load_fpu -__ia64_load_fpu: +GLOBAL_ENTRY(__ia64_load_fpu) alloc r2=ar.pfs,1,0,0,0 adds r3=16,in0 ;; @@ -507,12 +498,9 @@ ldf.fill.nta f126=[in0],32 ldf.fill.nta f127=[ r3],32 br.ret.sptk.few rp - .endp __ia64_load_fpu +END(__ia64_load_fpu) - .align 16 - .global __ia64_init_fpu - .proc __ia64_init_fpu -__ia64_init_fpu: +GLOBAL_ENTRY(__ia64_init_fpu) alloc r2=ar.pfs,0,0,0,0 stf.spill [sp]=f0 mov f32=f0 @@ -644,4 +632,74 @@ ldf.fill f126=[sp] mov f127=f0 br.ret.sptk.few rp - .endp __ia64_init_fpu +END(__ia64_init_fpu) + +/* + * Switch execution mode from virtual to physical or vice versa. + * + * Inputs: + * r16 = new psr to establish + * + * Note: RSE must already be in enforced lazy mode + */ +GLOBAL_ENTRY(ia64_switch_mode) + { + alloc r2=ar.pfs,0,0,0,0 + rsm psr.i | psr.ic // disable interrupts and interrupt collection + mov r15=ip + } + ;; + { + flushrs // must be first insn in group + srlz.i + shr.u r19=r15,61 // r19 <- top 3 bits of current IP + } + ;; + mov cr.ipsr=r16 // set new PSR + add r3=1f-ia64_switch_mode,r15 + xor r15=0x7,r19 // flip the region bits + + mov r17=ar.bsp + mov r14=rp // get return address into a general register + + // switch RSE backing store: + ;; + dep r17=r15,r17,61,3 // make ar.bsp physical or virtual + mov r18=ar.rnat // save ar.rnat + ;; + mov ar.bspstore=r17 // this steps on ar.rnat + dep r3=r15,r3,61,3 // make rfi return address physical or virtual + ;; + mov cr.iip=r3 + mov cr.ifs=r0 + dep sp=r15,sp,61,3 // make stack pointer physical or virtual + ;; + mov ar.rnat=r18 // restore ar.rnat + dep r14=r15,r14,61,3 // make function return address physical or virtual + rfi // must be last insn in group + ;; +1: mov rp=r14 + br.ret.sptk.few rp +END(ia64_switch_mode) + +#ifdef CONFIG_IA64_BRL_EMU + +/* + * Assembly routines used by brl_emu.c to set preserved register state. + */ + +#define SET_REG(reg) \ + GLOBAL_ENTRY(ia64_set_##reg); \ + alloc r16=ar.pfs,1,0,0,0; \ + mov reg=r32; \ + ;; \ + br.ret.sptk rp; \ + END(ia64_set_##reg) + +SET_REG(b1); +SET_REG(b2); +SET_REG(b3); +SET_REG(b4); +SET_REG(b5); + +#endif /* CONFIG_IA64_BRL_EMU */ diff -urN linux-2.4.0-test1/arch/ia64/kernel/ia64_ksyms.c linux-2.4.0-test1-lia/arch/ia64/kernel/ia64_ksyms.c --- linux-2.4.0-test1/arch/ia64/kernel/ia64_ksyms.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/ia64_ksyms.c Fri Jun 9 17:10:26 2000 @@ -0,0 +1,72 @@ +/* + * Architecture-specific kernel symbols + */ + +#include +#include + +#include +EXPORT_SYMBOL_NOVERS(memset); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL_NOVERS(memcpy); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(strcat); +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strncat); +EXPORT_SYMBOL(strncmp); +EXPORT_SYMBOL(strncpy); +EXPORT_SYMBOL(strtok); + +#include +EXPORT_SYMBOL(pci_alloc_consistent); +EXPORT_SYMBOL(pci_free_consistent); + +#include +#include +EXPORT_SYMBOL(csum_partial_copy_nocheck); + +#include +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(disable_irq); + +#include +#include +EXPORT_SYMBOL(irq_stat); + +#include +EXPORT_SYMBOL(cpu_data); +EXPORT_SYMBOL(kernel_thread); + +#ifdef CONFIG_SMP +EXPORT_SYMBOL(synchronize_irq); + +#include +EXPORT_SYMBOL(kernel_flag); + +#include +EXPORT_SYMBOL(__global_sti); +EXPORT_SYMBOL(__global_cli); +EXPORT_SYMBOL(__global_save_flags); +EXPORT_SYMBOL(__global_restore_flags); + +#endif + +#include +EXPORT_SYMBOL(__copy_user); + +#include +EXPORT_SYMBOL(__ia64_syscall); + +/* from arch/ia64/lib */ +extern void __divdi3(void); +extern void __udivdi3(void); +extern void __moddi3(void); +extern void __umoddi3(void); + +EXPORT_SYMBOL_NOVERS(__divdi3); +EXPORT_SYMBOL_NOVERS(__udivdi3); +EXPORT_SYMBOL_NOVERS(__moddi3); +EXPORT_SYMBOL_NOVERS(__umoddi3); diff -urN linux-2.4.0-test1/arch/ia64/kernel/irq.c linux-2.4.0-test1-lia/arch/ia64/kernel/irq.c --- linux-2.4.0-test1/arch/ia64/kernel/irq.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/irq.c Tue May 2 12:46:36 2000 @@ -582,7 +582,8 @@ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { action = desc->action; status &= ~IRQ_PENDING; /* we commit to handling */ - status |= IRQ_INPROGRESS; /* we are handling it */ + if (!(status & IRQ_PER_CPU)) + status |= IRQ_INPROGRESS; /* we are handling it */ } desc->status = status; diff -urN linux-2.4.0-test1/arch/ia64/kernel/irq_ia64.c linux-2.4.0-test1-lia/arch/ia64/kernel/irq_ia64.c --- linux-2.4.0-test1/arch/ia64/kernel/irq_ia64.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/irq_ia64.c Wed May 10 16:47:53 2000 @@ -33,7 +33,9 @@ #include #include -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#define IRQ_DEBUG 0 + +#ifdef CONFIG_ITANIUM_A1_SPECIFIC spinlock_t ivr_read_lock; #endif @@ -49,7 +51,7 @@ 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x40, 0x41 }; -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#ifdef CONFIG_ITANIUM_A1_SPECIFIC int usbfix; @@ -63,7 +65,7 @@ __setup("usbfix", usbfix_option); -#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ +#endif /* CONFIG_ITANIUM_A1_SPECIFIC */ /* * That's where the IVT branches when we get an external @@ -73,13 +75,8 @@ void ia64_handle_irq (unsigned long vector, struct pt_regs *regs) { - unsigned long bsp, sp, saved_tpr; - -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC -# ifndef CONFIG_SMP - static unsigned int max_prio = 0; - unsigned int prev_prio; -# endif + unsigned long saved_tpr; +#ifdef CONFIG_ITANIUM_A1_SPECIFIC unsigned long eoi_ptr; # ifdef CONFIG_USB @@ -95,18 +92,14 @@ spin_lock(&ivr_read_lock); { unsigned int tmp; - /* * Disable PCI writes */ outl(0x80ff81c0, 0xcf8); tmp = inl(0xcfc); outl(tmp | 0x400, 0xcfc); - eoi_ptr = inl(0xcfc); - vector = ia64_get_ivr(); - /* * Enable PCI writes */ @@ -118,75 +111,61 @@ if (usbfix) reenable_usb(); # endif +#endif /* CONFIG_ITANIUM_A1_SPECIFIC */ -# ifndef CONFIG_SMP - prev_prio = max_prio; - if (vector < max_prio) { - printk ("ia64_handle_irq: got vector %lu while %u was in progress!\n", - vector, max_prio); - - } else - max_prio = vector; -# endif /* !CONFIG_SMP */ -#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ - - /* - * Always set TPR to limit maximum interrupt nesting depth to - * 16 (without this, it would be ~240, which could easily lead - * to kernel stack overflows. - */ - saved_tpr = ia64_get_tpr(); - ia64_srlz_d(); - ia64_set_tpr(vector); - ia64_srlz_d(); +#if IRQ_DEBUG + { + unsigned long bsp, sp; - asm ("mov %0=ar.bsp" : "=r"(bsp)); - asm ("mov %0=sp" : "=r"(sp)); + asm ("mov %0=ar.bsp" : "=r"(bsp)); + asm ("mov %0=sp" : "=r"(sp)); - if ((sp - bsp) < 1024) { - static long last_time; - static unsigned char count; - - if (count > 5 && jiffies - last_time > 5*HZ) - count = 0; - if (++count < 5) { - last_time = jiffies; - printk("ia64_handle_irq: DANGER: less than 1KB of free stack space!!\n" - "(bsp=0x%lx, sp=%lx)\n", bsp, sp); + if ((sp - bsp) < 1024) { + static unsigned char count; + static long last_time; + + if (count > 5 && jiffies - last_time > 5*HZ) + count = 0; + if (++count < 5) { + last_time = jiffies; + printk("ia64_handle_irq: DANGER: less than " + "1KB of free stack space!!\n" + "(bsp=0x%lx, sp=%lx)\n", bsp, sp); + } } } +#endif /* IRQ_DEBUG */ /* - * The interrupt is now said to be in service + * Always set TPR to limit maximum interrupt nesting depth to + * 16 (without this, it would be ~240, which could easily lead + * to kernel stack overflows). */ - if (vector >= NR_IRQS) { - printk("handle_irq: invalid vector %lu\n", vector); - goto out; - } - - do_IRQ(vector, regs); - out: -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC - { - long pEOI; - - asm ("mov %0=0;; (p1) mov %0=1" : "=r"(pEOI)); - if (!pEOI) { - printk("Yikes: ia64_handle_irq() without pEOI!!\n"); - asm volatile ("cmp.eq p1,p0=r0,r0" : "=r"(pEOI)); + saved_tpr = ia64_get_tpr(); + ia64_srlz_d(); + do { + if (vector >= NR_IRQS) { + printk("handle_irq: invalid vector %lu\n", vector); + ia64_set_tpr(saved_tpr); + ia64_srlz_d(); + return; } - } + ia64_set_tpr(vector); + ia64_srlz_d(); - local_irq_disable(); -# ifndef CONFIG_SMP - if (max_prio == vector) - max_prio = prev_prio; -# endif /* !CONFIG_SMP */ -#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ + do_IRQ(vector, regs); - ia64_srlz_d(); - ia64_set_tpr(saved_tpr); - ia64_srlz_d(); + /* + * Disable interrupts and send EOI: + */ + local_irq_disable(); + ia64_set_tpr(saved_tpr); + ia64_eoi(); +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + break; +#endif + vector = ia64_get_ivr(); + } while (vector != IA64_SPURIOUS_INT); } #ifdef CONFIG_SMP @@ -210,12 +189,12 @@ ia64_set_lrr0(0, 1); ia64_set_lrr1(0, 1); - irq_desc[TIMER_IRQ].handler = &irq_type_ia64_sapic; irq_desc[IA64_SPURIOUS_INT].handler = &irq_type_ia64_sapic; #ifdef CONFIG_SMP /* * Configure the IPI vector and handler */ + irq_desc[IPI_IRQ].status |= IRQ_PER_CPU; irq_desc[IPI_IRQ].handler = &irq_type_ia64_sapic; setup_irq(IPI_IRQ, &ipi_irqaction); #endif @@ -234,7 +213,7 @@ { unsigned long ipi_addr; unsigned long ipi_data; -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#ifdef CONFIG_ITANIUM_A1_SPECIFIC unsigned long flags; #endif # define EID 0 @@ -242,13 +221,13 @@ ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_addr = ipi_base_addr | ((cpu << 8 | EID) << 4) | ((redirect & 1) << 3); -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#ifdef CONFIG_ITANIUM_A1_SPECIFIC spin_lock_irqsave(&ivr_read_lock, flags); -#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ +#endif writeq(ipi_data, ipi_addr); -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#ifdef CONFIG_ITANIUM_A1_SPECIFIC spin_unlock_irqrestore(&ivr_read_lock, flags); #endif } diff -urN linux-2.4.0-test1/arch/ia64/kernel/ivt.S linux-2.4.0-test1-lia/arch/ia64/kernel/ivt.S --- linux-2.4.0-test1/arch/ia64/kernel/ivt.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/ivt.S Thu Jun 1 01:03:38 2000 @@ -170,9 +170,31 @@ * The ITLB basically does the same as the VHPT handler except * that we always insert exactly one instruction TLB entry. */ +#if 0 + /* + * This code works, but I don't want to enable it until I have numbers + * that prove this to be a win. + */ + mov r31=pr // save predicates + ;; + thash r17=r16 // compute virtual address of L3 PTE + ;; + ld8.s r18=[r17] // try to read L3 PTE + ;; + tnat.nz p6,p0=r18 // did read succeed? +(p6) br.cond.spnt.many 1f + ;; + itc.i r18 + ;; + mov pr=r31,-1 + rfi + +1: rsm psr.dt // use physical addressing for data +#else mov r16=cr.ifa // get address that caused the TLB miss ;; rsm psr.dt // use physical addressing for data +#endif mov r31=pr // save the predicate registers mov r19=ar.k7 // get page table base address shl r21=r16,3 // shift bit 60 into sign bit @@ -222,9 +244,31 @@ * that we always insert exactly one data TLB entry. */ mov r16=cr.ifa // get address that caused the TLB miss +#if 0 + /* + * This code works, but I don't want to enable it until I have numbers + * that prove this to be a win. + */ + mov r31=pr // save predicates + ;; + thash r17=r16 // compute virtual address of L3 PTE + ;; + ld8.s r18=[r17] // try to read L3 PTE + ;; + tnat.nz p6,p0=r18 // did read succeed? +(p6) br.cond.spnt.many 1f + ;; + itc.d r18 ;; + mov pr=r31,-1 + rfi + +1: rsm psr.dt // use physical addressing for data +#else rsm psr.dt // use physical addressing for data mov r31=pr // save the predicate registers + ;; +#endif mov r19=ar.k7 // get page table base address shl r21=r16,3 // shift bit 60 into sign bit shr.u r17=r16,61 // get the region number into r17 @@ -265,37 +309,6 @@ mov pr=r31,-1 // restore predicate registers rfi - //----------------------------------------------------------------------------------- - // call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address) -page_fault: - SAVE_MIN_WITH_COVER - // - // Copy control registers to temporary registers, then turn on psr bits, - // then copy the temporary regs to the output regs. We have to do this - // because the "alloc" can cause a mandatory store which could lead to - // an "Alt DTLB" fault which we can handle only if psr.ic is on. - // - mov r8=cr.ifa - mov r9=cr.isr - adds r3=8,r2 // set up second base pointer - ;; - ssm psr.ic | psr.dt - ;; - srlz.i // guarantee that interrupt collection is enabled - ;; -(p15) ssm psr.i // restore psr.i - movl r14=ia64_leave_kernel - ;; - alloc r15=ar.pfs,0,0,3,0 // must be first in insn group - mov out0=r8 - mov out1=r9 - ;; - SAVE_REST - mov rp=r14 - ;; - adds out2=16,r12 // out2 = pointer to pt_regs - br.call.sptk.few b6=ia64_do_page_fault // ignore return address - .align 1024 ///////////////////////////////////////////////////////////////////////////////////////// // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) @@ -303,7 +316,7 @@ movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX ;; shr.u r18=r16,57 // move address bit 61 to bit 4 - dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits + dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits ;; andcm r18=0x10,r18 // bit 4=~address-bit(61) dep r16=r17,r16,0,12 // insert PTE control bits into r16 @@ -318,18 +331,57 @@ // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) mov r16=cr.ifa // get address that caused the TLB miss movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW + mov r20=cr.isr + mov r21=cr.ipsr + mov r19=pr ;; + tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? shr.u r18=r16,57 // move address bit 61 to bit 4 - dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits + dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits ;; andcm r18=0x10,r18 // bit 4=~address-bit(61) dep r16=r17,r16,0,12 // insert PTE control bits into r16 ;; or r16=r16,r18 // set bit 4 (uncached) if the access was to region 6 +(p6) mov cr.ipsr=r21 ;; - itc.d r16 // insert the TLB entry +(p7) itc.d r16 // insert the TLB entry + mov pr=r19,-1 rfi + ;; + + //----------------------------------------------------------------------------------- + // call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address) +page_fault: + SAVE_MIN_WITH_COVER + // + // Copy control registers to temporary registers, then turn on psr bits, + // then copy the temporary regs to the output regs. We have to do this + // because the "alloc" can cause a mandatory store which could lead to + // an "Alt DTLB" fault which we can handle only if psr.ic is on. + // + mov r8=cr.ifa + mov r9=cr.isr + adds r3=8,r2 // set up second base pointer + ;; + ssm psr.ic | psr.dt + ;; + srlz.i // guarantee that interrupt collection is enabled + ;; +(p15) ssm psr.i // restore psr.i + movl r14=ia64_leave_kernel + ;; + alloc r15=ar.pfs,0,0,3,0 // must be first in insn group + mov out0=r8 + mov out1=r9 + ;; + SAVE_REST + mov rp=r14 + ;; + adds out2=16,r12 // out2 = pointer to pt_regs + br.call.sptk.few b6=ia64_do_page_fault // ignore return address + .align 1024 ///////////////////////////////////////////////////////////////////////////////////////// // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) @@ -338,7 +390,7 @@ // Access-bit, or Data Access-bit faults cause a nested fault because the // dTLB entry for the virtual page table isn't present. In such a case, // we lookup the pte for the faulting address by walking the page table - // and return to the contination point passed in register r30. + // and return to the continuation point passed in register r30. // In accessing the page tables, we don't need to check for NULL entries // because if the page tables didn't map the faulting address, it would not // be possible to receive one of the above faults. @@ -441,9 +493,6 @@ tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? ;; (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa -#if 0 - ;; -#endif mov pr=r31,-1 #endif /* CONFIG_ITANIUM */ movl r30=1f // load continuation point in case of nested fault @@ -489,7 +538,6 @@ ;; srlz.d // ensure everyone knows psr.dt is off... cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so) - #if 1 // Allow syscalls via the old system call number for the time being. This is // so we can transition to the new syscall number in a relatively smooth @@ -498,7 +546,6 @@ ;; (p7) cmp.eq.or.andcm p0,p7=r16,r17 // is this the old syscall number? #endif - (p7) br.cond.spnt.many non_syscall SAVE_MIN // uses r31; defines r2: @@ -575,13 +622,12 @@ ssm psr.ic | psr.dt // turn interrupt collection and data translation back on ;; adds r3=8,r2 // set up second base pointer for SAVE_REST - cmp.eq pEOI,p0=r0,r0 // set pEOI flag so that ia64_leave_kernel writes cr.eoi srlz.i // ensure everybody knows psr.ic and psr.dt are back on ;; SAVE_REST ;; alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#ifdef CONFIG_ITANIUM_A1_SPECIFIC mov out0=r0 // defer reading of cr.ivr to handle_irq... #else mov out0=cr.ivr // pass cr.ivr as first arg @@ -609,6 +655,50 @@ // 0x3c00 Entry 15 (size 64 bundles) Reserved FAULT(15) +// +// Squatting in this space ... +// +// This special case dispatcher for illegal operation faults +// allows preserved registers to be modified through a +// callback function (asm only) that is handed back from +// the fault handler in r8. Up to three arguments can be +// passed to the callback function by returning an aggregate +// with the callback as its first element, followed by the +// arguments. +// +dispatch_illegal_op_fault: + SAVE_MIN_WITH_COVER + // + // The "alloc" can cause a mandatory store which could lead to + // an "Alt DTLB" fault which we can handle only if psr.ic is on. + // + ssm psr.ic | psr.dt + ;; + srlz.i // guarantee that interrupt collection is enabled + ;; +(p15) ssm psr.i // restore psr.i + adds r3=8,r2 // set up second base pointer for SAVE_REST + ;; + alloc r14=ar.pfs,0,0,1,0 // must be first in insn group + mov out0=ar.ec + ;; + SAVE_REST + ;; + br.call.sptk.few rp=ia64_illegal_op_fault + ;; + alloc r14=ar.pfs,0,0,3,0 // must be first in insn group + mov out0=r9 + mov out1=r10 + mov out2=r11 + movl r15=ia64_leave_kernel + ;; + mov rp=r15 + mov b6=r8 + ;; + cmp.ne p6,p0=0,r8 +(p6) br.call.dpnt b6=b6 // call returns to ia64_leave_kernel + br.sptk ia64_leave_kernel + .align 1024 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4000 Entry 16 (size 64 bundles) Reserved @@ -643,14 +733,17 @@ (p6) br.call.dpnt.few b6=non_ia32_syscall adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions - + adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp + ;; + cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 + st8 [r15]=r8 // save orignal EAX in r1 (IA32 procs don't use the GP) ;; alloc r15=ar.pfs,0,0,6,0 // must first in an insn group ;; ld4 r8=[r14],8 // r8 == EAX (syscall number) - mov r15=0xff + mov r15=190 // sys_vfork - last implemented system call ;; - cmp.ltu.unc p6,p7=r8,r15 + cmp.leu.unc p6,p7=r8,r15 ld4 out1=[r14],8 // r9 == ecx ;; ld4 out2=[r14],8 // r10 == edx @@ -868,7 +961,16 @@ .align 256 ///////////////////////////////////////////////////////////////////////////////////////// // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) - FAULT(24) + mov r16=cr.isr + mov r31=pr + rsm psr.dt // avoid nested faults due to TLB misses... + ;; + srlz.d // ensure everyone knows psr.dt is off... + cmp4.eq p6,p0=0,r16 +(p6) br.sptk dispatch_illegal_op_fault + ;; + mov r19=24 // fault number + br.cond.sptk.many dispatch_to_fault_handler .align 256 ///////////////////////////////////////////////////////////////////////////////////////// @@ -939,7 +1041,6 @@ mov r31=pr // prepare to save predicates ;; srlz.d // ensure everyone knows psr.dt is off - mov r19=30 // error vector for fault_handler (when kernel) br.cond.sptk.many dispatch_unaligned_handler .align 256 diff -urN linux-2.4.0-test1/arch/ia64/kernel/mca.c linux-2.4.0-test1-lia/arch/ia64/kernel/mca.c --- linux-2.4.0-test1/arch/ia64/kernel/mca.c Fri Apr 21 16:37:25 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/mca.c Tue May 2 12:46:36 2000 @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff -urN linux-2.4.0-test1/arch/ia64/kernel/mca_asm.S linux-2.4.0-test1-lia/arch/ia64/kernel/mca_asm.S --- linux-2.4.0-test1/arch/ia64/kernel/mca_asm.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/mca_asm.S Fri Jun 9 17:23:02 2000 @@ -6,7 +6,6 @@ // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp kstack, // switch modes, jump to C INIT handler // -#include #include #include #include diff -urN linux-2.4.0-test1/arch/ia64/kernel/minstate.h linux-2.4.0-test1-lia/arch/ia64/kernel/minstate.h --- linux-2.4.0-test1/arch/ia64/kernel/minstate.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/minstate.h Thu Jun 1 01:03:59 2000 @@ -101,7 +101,6 @@ ;; \ st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \ st8.spill [r17]=rR1,16; /* save original r1 */ \ - cmp.ne pEOI,p0=r0,r0 /* clear pEOI by default */ \ ;; \ .mem.offset 0,0; st8.spill [r16]=r2,16; \ .mem.offset 8,0; st8.spill [r17]=r3,16; \ diff -urN linux-2.4.0-test1/arch/ia64/kernel/pal.S linux-2.4.0-test1-lia/arch/ia64/kernel/pal.S --- linux-2.4.0-test1/arch/ia64/kernel/pal.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/pal.S Thu Jun 1 01:04:13 2000 @@ -4,9 +4,16 @@ * * Copyright (C) 1999 Don Dugger * Copyright (C) 1999 Walt Drummond - * Copyright (C) 1999 David Mosberger + * Copyright (C) 1999-2000 David Mosberger + * Copyright (C) 2000 Stephane Eranian + * + * 05/22/2000 eranian Added support for stacked register calls + * 05/24/2000 eranian Added support for physical mode static calls */ +#include +#include + .text .psr abi64 .psr lsb @@ -24,29 +31,23 @@ * * in0 Address of the PAL entry point (text address, NOT a function descriptor). */ - .align 16 - .global ia64_pal_handler_init - .proc ia64_pal_handler_init -ia64_pal_handler_init: +GLOBAL_ENTRY(ia64_pal_handler_init) alloc r3=ar.pfs,1,0,0,0 movl r2=pal_entry_point ;; st8 [r2]=in0 br.ret.sptk.few rp - - .endp ia64_pal_handler_init +END(ia64_pal_handler_init) /* * Default PAL call handler. This needs to be coded in assembly because it uses * the static calling convention, i.e., the RSE may not be used and calls are * done via "br.cond" (not "br.call"). */ - .align 16 - .global ia64_pal_default_handler - .proc ia64_pal_default_handler -ia64_pal_default_handler: +GLOBAL_ENTRY(ia64_pal_default_handler) mov r8=-1 br.cond.sptk.few rp +END(ia64_pal_default_handler) /* * Make a PAL call using the static calling convention. @@ -56,44 +57,23 @@ * in2 - in4 Remaning PAL arguments * */ - -#ifdef __GCC_MULTIREG_RETVALS__ -# define arg0 in0 -# define arg1 in1 -# define arg2 in2 -# define arg3 in3 -# define arg4 in4 -#else -# define arg0 in1 -# define arg1 in2 -# define arg2 in3 -# define arg3 in4 -# define arg4 in5 -#endif - - .text - .psr abi64 - .psr lsb - .lsb - - .align 16 - .global ia64_pal_call_static - .proc ia64_pal_call_static -ia64_pal_call_static: - alloc loc0 = ar.pfs,6,90,0,0 +GLOBAL_ENTRY(ia64_pal_call_static) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)) + alloc loc1 = ar.pfs,6,90,0,0 movl loc2 = pal_entry_point 1: { - mov r28 = arg0 - mov r29 = arg1 + mov r28 = in0 + mov r29 = in1 mov r8 = ip } ;; ld8 loc2 = [loc2] // loc2 <- entry point - mov r30 = arg2 - mov r31 = arg3 + mov r30 = in2 + mov r31 = in3 ;; mov loc3 = psr - mov loc1 = rp + mov loc0 = rp + .body adds r8 = .ret0-1b,r8 ;; rsm psr.i @@ -102,18 +82,114 @@ ;; br.cond.sptk.few b7 .ret0: mov psr.l = loc3 -#ifndef __GCC_MULTIREG_RETVALS__ - st8 [in0] = r8, 8 + mov ar.pfs = loc1 + mov rp = loc0 ;; - st8 [in0] = r9, 8 + srlz.d // seralize restoration of psr.l + br.ret.sptk.few b0 +END(ia64_pal_call_static) + +/* + * Make a PAL call using the stacked registers calling convention. + * + * Inputs: + * in0 Index of PAL service + * in2 - in3 Remaning PAL arguments + */ +GLOBAL_ENTRY(ia64_pal_call_stacked) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)) + alloc loc1 = ar.pfs,5,4,87,0 + movl loc2 = pal_entry_point + + mov r28 = in0 // Index MUST be copied to r28 + mov out0 = in0 // AND in0 of PAL function + mov loc0 = rp + .body ;; - st8 [in0] = r10, 8 + ld8 loc2 = [loc2] // loc2 <- entry point + mov out1 = in1 + mov out2 = in2 + mov out3 = in3 + mov loc3 = psr ;; - st8 [in0] = r11, 8 -#endif - mov ar.pfs = loc0 - mov rp = loc1 + rsm psr.i + mov b7 = loc2 + ;; + br.call.sptk.many rp=b7 // now make the call +.ret2: + mov psr.l = loc3 + mov ar.pfs = loc1 + mov rp = loc0 + ;; + srlz.d // serialize restoration of psr.l + br.ret.sptk.few b0 +END(ia64_pal_call_stacked) + +/* + * Make a physical mode PAL call using the static registers calling convention. + * + * Inputs: + * in0 Index of PAL service + * in2 - in3 Remaning PAL arguments + * + * PSR_DB, PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. + * So we don't need to clear them. + */ +#define PAL_PSR_BITS_TO_CLEAR \ + (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ + IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ + IA64_PSR_DFL | IA64_PSR_DFH) + +#define PAL_PSR_BITS_TO_SET \ + (IA64_PSR_BN) + + +GLOBAL_ENTRY(ia64_pal_call_phys_static) + UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)) + alloc loc1 = ar.pfs,6,90,0,0 + movl loc2 = pal_entry_point +1: { + mov r28 = in0 // copy procedure index + mov r8 = ip // save ip to compute branch + mov loc0 = rp // save rp + } + .body + ;; + ld8 loc2 = [loc2] // loc2 <- entry point + mov r29 = in1 // first argument + mov r30 = in2 // copy arg2 + mov r31 = in3 // copy arg3 + ;; + mov loc3 = psr // save psr + adds r8 = .ret4-1b,r8 // calculate return address for call + ;; + mov loc4=ar.rsc // save RSE configuration + dep.z loc2=loc2,0,61 // convert pal entry point to physical + dep.z r8=r8,0,61 // convert rp to physical + ;; + mov b7 = loc2 // install target to branch reg + mov ar.rsc=r0 // put RSE in enforced lazy, LE mode + movl r16=PAL_PSR_BITS_TO_CLEAR + movl r17=PAL_PSR_BITS_TO_SET + ;; + or loc3=loc3,r17 // add in psr the bits to set + ;; + andcm r16=loc3,r16 // removes bits to clear from psr + br.call.sptk.few rp=ia64_switch_mode +.ret3: + mov rp = r8 // install return address (physical) + br.cond.sptk.few b7 +.ret4: + mov ar.rsc=r0 // put RSE in enforced lazy, LE mode + mov r16=loc3 // r16= original psr + br.call.sptk.few rp=ia64_switch_mode // return to virtual mode + +.ret5: mov psr.l = loc3 // restore init PSR + + mov ar.pfs = loc1 + mov rp = loc0 ;; + mov ar.rsc=loc4 // restore RSE configuration srlz.d // seralize restoration of psr.l br.ret.sptk.few b0 - .endp ia64_pal_call_static +END(ia64_pal_call_phys_static) diff -urN linux-2.4.0-test1/arch/ia64/kernel/palinfo.c linux-2.4.0-test1-lia/arch/ia64/kernel/palinfo.c --- linux-2.4.0-test1/arch/ia64/kernel/palinfo.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/palinfo.c Thu Jun 1 01:04:23 2000 @@ -0,0 +1,780 @@ +/* + * palinfo.c + * + * Prints processor specific information reported by PAL. + * This code is based on specification of PAL as of the + * Intel IA-64 Architecture Software Developer's Manual v1.0. + * + * + * Copyright (C) 2000 Hewlett-Packard Co + * Copyright (C) 2000 Stephane Eranian + * + * 05/26/2000 S.Eranian initial release + * + * ISSUES: + * - because of some PAL bugs, some calls return invalid results or + * are empty for now. + * - remove hack to avoid problem with <= 256M RAM for itr. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * Hope to get rid of these in a near future +*/ +#define IA64_PAL_VERSION_BUG 1 + +#define PALINFO_VERSION "0.1" + +typedef int (*palinfo_func_t)(char*); + +typedef struct { + const char *name; /* name of the proc entry */ + palinfo_func_t proc_read; /* function to call for reading */ + struct proc_dir_entry *entry; /* registered entry (removal) */ +} palinfo_entry_t; + +static struct proc_dir_entry *palinfo_dir; + +/* + * A bunch of string array to get pretty printing + */ + +static char *cache_types[] = { + "", /* not used */ + "Instruction", + "Data", + "Data/Instruction" /* unified */ +}; + +static const char *cache_mattrib[]={ + "WriteThrough", + "WriteBack", + "", /* reserved */ + "" /* reserved */ +}; + +static const char *cache_st_hints[]={ + "Temporal, level 1", + "Reserved", + "Reserved", + "Non-temporal, all levels", + "Reserved", + "Reserved", + "Reserved", + "Reserved" +}; + +static const char *cache_ld_hints[]={ + "Temporal, level 1", + "Non-temporal, level 1", + "Reserved", + "Non-temporal, all levels", + "Reserved", + "Reserved", + "Reserved", + "Reserved" +}; + +static const char *rse_hints[]={ + "enforced lazy", + "eager stores", + "eager loads", + "eager loads and stores" +}; + +#define RSE_HINTS_COUNT (sizeof(rse_hints)/sizeof(const char *)) + +/* + * The current resvision of the Volume 2 of + * IA-64 Architecture Software Developer's Manual is wrong. + * Table 4-10 has invalid information concerning the ma field: + * Correct table is: + * bit 0 - 001 - UC + * bit 4 - 100 - UC + * bit 5 - 101 - UCE + * bit 6 - 110 - WC + * bit 7 - 111 - NatPage + */ +static const char *mem_attrib[]={ + "Write Back (WB)", /* 000 */ + "Uncacheable (UC)", /* 001 */ + "Reserved", /* 010 */ + "Reserved", /* 011 */ + "Uncacheable (UC)", /* 100 */ + "Uncacheable Exported (UCE)", /* 101 */ + "Write Coalescing (WC)", /* 110 */ + "NaTPage" /* 111 */ +}; + + + +/* + * Allocate a buffer suitable for calling PAL code in Virtual mode + * + * The documentation (PAL2.6) requires thius buffer to have a pinned + * translation to avoid any DTLB faults. For this reason we allocate + * a page (large enough to hold any possible reply) and use a DTC + * to hold the translation during the call. A call the free_palbuffer() + * is required to release ALL resources (page + translation). + * + * The size of the page allocated is based on the PAGE_SIZE defined + * at compile time for the kernel, i.e. >= 4Kb. + * + * Return: a pointer to the newly allocated page (virtual address) + */ +static void * +get_palcall_buffer(void) +{ + void *tmp; + + tmp = (void *)__get_free_page(GFP_KERNEL); + if (tmp == 0) { + printk(KERN_ERR "%s: can't get a buffer page\n", __FUNCTION__); + } else if ( ((u64)tmp - PAGE_OFFSET) > (1<<_PAGE_SIZE_256M) ) { /* XXX: temporary hack */ + unsigned long flags; + + /* PSR.ic must be zero to insert new DTR */ + ia64_clear_ic(flags); + + /* + * we only insert of DTR + * + * XXX: we need to figure out a way to "allocate" TR(s) to avoid + * conflicts. Maybe something in an include file like pgtable.h + * page.h or processor.h + * + * ITR0/DTR0: used for kernel code/data + * ITR1/DTR1: used by HP simulator + * ITR2/DTR2: used to map PAL code + */ + ia64_itr(0x2, 3, (u64)tmp, + pte_val(mk_pte_phys(__pa(tmp), __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), PAGE_SHIFT); + + ia64_srlz_d (); + + __restore_flags(flags); + } + + return tmp; +} + +/* + * Free a palcall buffer allocated with the previous call + * + * The translation is also purged. + */ +static void +free_palcall_buffer(void *addr) +{ + __free_page(addr); + ia64_ptr(0x2, (u64)addr, PAGE_SHIFT); + ia64_srlz_d (); +} + +/* + * Take a 64bit vector and produces a string such that + * if bit n is set then 2^n in clear text is generated. The adjustment + * to the right unit is also done. + * + * Input: + * - a pointer to a buffer to hold the string + * - a 64-bit vector + * Ouput: + * - a pointer to the end of the buffer + * + */ +static char * +bitvector_process(char *p, u64 vector) +{ + int i,j; + const char *units[]={ "", "K", "M", "G", "T" }; + + for (i=0, j=0; i < 64; i++ , j=i/10) { + if (vector & 0x1) { + p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]); + } + vector >>= 1; + } + return p; +} + +/* + * Take a 64bit vector and produces a string such that + * if bit n is set then register n is present. The function + * takes into account consecutive registers and prints out ranges. + * + * Input: + * - a pointer to a buffer to hold the string + * - a 64-bit vector + * Ouput: + * - a pointer to the end of the buffer + * + */ +static char * +bitregister_process(char *p, u64 *reg_info, int max) +{ + int i, begin, skip = 0; + u64 value = reg_info[0]; + + value >>= i = begin = ffs(value) - 1; + + for(; i < max; i++ ) { + + if (i != 0 && (i%64) == 0) value = *++reg_info; + + if ((value & 0x1) == 0 && skip == 0) { + if (begin <= i - 2) + p += sprintf(p, "%d-%d ", begin, i-1); + else + p += sprintf(p, "%d ", i-1); + skip = 1; + begin = -1; + } else if ((value & 0x1) && skip == 1) { + skip = 0; + begin = i; + } + value >>=1; + } + if (begin > -1) { + if (begin < 127) + p += sprintf(p, "%d-127", begin); + else + p += sprintf(p, "127"); + } + + return p; +} + +static int +power_info(char *page) +{ + s64 status; + char *p = page; + pal_power_mgmt_info_u_t *halt_info; + int i; + + halt_info = get_palcall_buffer(); + if (halt_info == 0) return 0; + + status = ia64_pal_halt_info(halt_info); + if (status != 0) { + free_palcall_buffer(halt_info); + return 0; + } + + for (i=0; i < 8 ; i++ ) { + if (halt_info[i].pal_power_mgmt_info_s.im == 1) { + p += sprintf(p, "Power level %d:\n" \ + "\tentry_latency : %d cycles\n" \ + "\texit_latency : %d cycles\n" \ + "\tpower consumption : %d mW\n" \ + "\tCache+TLB coherency : %s\n", i, + halt_info[i].pal_power_mgmt_info_s.entry_latency, + halt_info[i].pal_power_mgmt_info_s.exit_latency, + halt_info[i].pal_power_mgmt_info_s.power_consumption, + halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No"); + } else { + p += sprintf(p,"Power level %d: not implemented\n",i); + } + } + + free_palcall_buffer(halt_info); + + return p - page; +} + +static int +cache_info(char *page) +{ + char *p = page; + u64 levels, unique_caches; + pal_cache_config_info_t cci; + int i,j, k; + s64 status; + + if ((status=ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { + printk("ia64_pal_cache_summary=%ld\n", status); + return 0; + } + + p += sprintf(p, "Cache levels : %ld\n" \ + "Unique caches : %ld\n\n", + levels, + unique_caches); + + for (i=0; i < levels; i++) { + + for (j=2; j >0 ; j--) { + + /* even without unification some level may not be present */ + if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) { + continue; + } + p += sprintf(p, "%s Cache level %d:\n" \ + "\tSize : %ld bytes\n" \ + "\tAttributes : ", + cache_types[j+cci.pcci_unified], i+1, + cci.pcci_cache_size); + + if (cci.pcci_unified) p += sprintf(p, "Unified "); + + p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]); + + p += sprintf(p, "\tAssociativity : %d\n" \ + "\tLine size : %d bytes\n" \ + "\tStride : %d bytes\n", + cci.pcci_assoc, + 1<>=1; + } + p += sprintf(p, "\n\tLoad hints : "); + + for(k=0; k < 8; k++ ) { + if ( cci.pcci_ld_hints & 0x1) p += sprintf(p, "[%s]", cache_ld_hints[k]); + cci.pcci_ld_hints >>=1; + } + p += sprintf(p, "\n\tAlias boundary : %d byte(s)\n" \ + "\tTag LSB : %d\n" \ + "\tTag MSB : %d\n", + 1<0 ; j--) { + tc_pages = 0; /* just in case */ + + + /* even without unification, some levels may not be present */ + if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { + continue; + } + + p += sprintf(p, "\n%s Translation Cache Level %d:\n" \ + "\tHash sets : %d\n" \ + "\tAssociativity : %d\n" \ + "\tNumber of entries : %d\n" \ + "\tFlags : ", + cache_types[j+tc_info.tc_unified], i+1, + tc_info.tc_num_sets, + tc_info.tc_associativity, + tc_info.tc_num_entries); + + if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); + if (tc_info.tc_unified) p += sprintf(p, "Unified "); + if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); + + p += sprintf(p, "\n\tSupported page sizes: "); + + p = bitvector_process(p, tc_pages); + + /* when unified date (j=2) is enough */ + if (tc_info.tc_unified) break; + } + } + p += sprintf(p, "\n"); + + return p - page; +} + + +static int +register_info(char *page) +{ + char *p = page; + u64 reg_info[2]; + u64 info; + u64 phys_stacked; + pal_hints_u_t hints; + u64 iregs, dregs; + char *info_type[]={ + "Implemented AR(s)", + "AR(s) with read side-effects", + "Implemented CR(s)", + "CR(s) with read side-effects", + }; + + for(info=0; info < 4; info++) { + + if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) return 0; + + p += sprintf(p, "%-32s : ", info_type[info]); + + p = bitregister_process(p, reg_info, 128); + + p += sprintf(p, "\n"); + } + + if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; + + p += sprintf(p, "RSE stacked physical registers : %ld\n" \ + "RSE load/store hints : %ld (%s)\n", + phys_stacked, + hints.ph_data, + hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); + + if (ia64_pal_debug_info(&iregs, &dregs)) return 0; + + p += sprintf(p, "Instruction debug register pairs : %ld\n" \ + "Data debug register pairs : %ld\n", + iregs, dregs); + + return p - page; +} + +static const char *proc_features[]={ + NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, + NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, + NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, + NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, + NULL,NULL,NULL,NULL,NULL, + "XIP,XPSR,XFS implemented", + "XR1-XR3 implemented", + "Disable dynamic predicate prediction", + "Disable processor physical number", + "Disable dynamic data cache prefetch", + "Disable dynamic inst cache prefetch", + "Disable dynamic branch prediction", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "Disable BINIT on processor time-out", + "Disable dynamic power management (DPM)", + "Disable coherency", + "Disable cache", + "Enable CMCI promotion", + "Enable MCA to BINIT promotion", + "Enable MCA promotion", + "Enable BEER promotion" +}; + + +static int +processor_info(char *page) +{ + char *p = page; + const char **v = proc_features; + u64 avail=1, status=1, control=1; + int i; + s64 ret; + + /* must be in physical mode */ + if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0; + + for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) { + if ( ! *v ) continue; + p += sprintf(p, "%-40s : %s%s %s\n", *v, + avail & 0x1 ? "" : "NotImpl", + avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "", + avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); + } + return p - page; +} + +/* + * physical mode call for PAL_VERSION is working fine. + * This function is meant to go away once PAL get fixed. + */ +static inline s64 +ia64_pal_version_phys(pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0); + if (pal_min_version) + pal_min_version->pal_version_val = iprv.v0; + if (pal_cur_version) + pal_cur_version->pal_version_val = iprv.v1; + return iprv.status; +} + +static int +version_info(char *page) +{ + s64 status; + pal_version_u_t min_ver, cur_ver; + char *p = page; + +#ifdef IA64_PAL_VERSION_BUG + /* The virtual mode call is buggy. But the physical mode call seems + * to be ok. Until they fix virtual mode, we do physical. + */ + status = ia64_pal_version_phys(&min_ver, &cur_ver); +#else + /* The system crashes if you enable this code with the wrong PAL + * code + */ + status = ia64_pal_version(&min_ver, &cur_ver); +#endif + if (status != 0) return 0; + + p += sprintf(p, "PAL_vendor : 0x%x (min=0x%x)\n" \ + "PAL_A revision : 0x%x (min=0x%x)\n" \ + "PAL_A model : 0x%x (min=0x%x)\n" \ + "PAL_B mode : 0x%x (min=0x%x)\n" \ + "PAL_B revision : 0x%x (min=0x%x)\n", + cur_ver.pal_version_s.pv_pal_vendor, + min_ver.pal_version_s.pv_pal_vendor, + cur_ver.pal_version_s.pv_pal_a_rev, + cur_ver.pal_version_s.pv_pal_a_rev, + cur_ver.pal_version_s.pv_pal_a_model, + min_ver.pal_version_s.pv_pal_a_model, + cur_ver.pal_version_s.pv_pal_b_rev, + min_ver.pal_version_s.pv_pal_b_rev, + cur_ver.pal_version_s.pv_pal_b_model, + min_ver.pal_version_s.pv_pal_b_model); + + return p - page; +} + +static int +perfmon_info(char *page) +{ + char *p = page; + u64 *pm_buffer; + pal_perf_mon_info_u_t pm_info; + + pm_buffer = (u64 *)get_palcall_buffer(); + if (pm_buffer == 0) return 0; + + if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) { + free_palcall_buffer(pm_buffer); + return 0; + } + +#ifdef IA64_PAL_PERF_MON_INFO_BUG + pm_buffer[5]=0x3; + pm_info.pal_perf_mon_info_s.cycles = 0x12; + pm_info.pal_perf_mon_info_s.retired = 0x08; +#endif + + p += sprintf(p, "PMC/PMD pairs : %d\n" \ + "Counter width : %d bits\n" \ + "Cycle event number : %d\n" \ + "Retired event number : %d\n" \ + "Implemented PMC : ", + pm_info.pal_perf_mon_info_s.generic, + pm_info.pal_perf_mon_info_s.width, + pm_info.pal_perf_mon_info_s.cycles, + pm_info.pal_perf_mon_info_s.retired); + + p = bitregister_process(p, pm_buffer, 256); + + p += sprintf(p, "\nImplemented PMD : "); + + p = bitregister_process(p, pm_buffer+4, 256); + + p += sprintf(p, "\nCycles count capable : "); + + p = bitregister_process(p, pm_buffer+8, 256); + + p += sprintf(p, "\nRetired bundles count capable : "); + + p = bitregister_process(p, pm_buffer+12, 256); + + p += sprintf(p, "\n"); + + free_palcall_buffer(pm_buffer); + + return p - page; +} + +static int +frequency_info(char *page) +{ + char *p = page; + struct pal_freq_ratio proc, itc, bus; + u64 base; + + if (ia64_pal_freq_base(&base) == -1) + p += sprintf(p, "Output clock : not implemented\n"); + else + p += sprintf(p, "Output clock : %ld ticks/s\n", base); + + if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; + + p += sprintf(p, "Processor/Clock ratio : %ld/%ld\n" \ + "Bus/Clock ratio : %ld/%ld\n" \ + "ITC/Clock ratio : %ld/%ld\n", + proc.num, proc.den, + bus.num, bus.den, + itc.num, itc.den); + + return p - page; +} + + +/* + * Entry point routine: all calls go trhough this function + */ +static int +palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + palinfo_func_t info = (palinfo_func_t)data; + int len = info(page); + + if (len <= off+count) *eof = 1; + + *start = page + off; + len -= off; + + if (len>count) len = count; + if (len<0) len = 0; + + return len; +} + +/* + * List names,function pairs for every entry in /proc/palinfo + * Must be terminated with the NULL,NULL entry. + */ +static palinfo_entry_t palinfo_entries[]={ + { "version_info", version_info, }, + { "vm_info", vm_info, }, + { "cache_info", cache_info, }, + { "power_info", power_info, }, + { "register_info", register_info, }, + { "processor_info", processor_info, }, + { "perfmon_info", perfmon_info, }, + { "frequency_info", frequency_info, }, + { NULL, NULL,} +}; + + +static int __init +palinfo_init(void) +{ + palinfo_entry_t *p; + + printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); + + palinfo_dir = create_proc_entry("palinfo", S_IFDIR | S_IRUGO | S_IXUGO, NULL); + + for (p = palinfo_entries; p->name ; p++){ + p->entry = create_proc_read_entry (p->name, 0, palinfo_dir, + palinfo_read_entry, p->proc_read); + } + + return 0; +} + +static int __exit +palinfo_exit(void) +{ + palinfo_entry_t *p; + + for (p = palinfo_entries; p->name ; p++){ + remove_proc_entry (p->name, palinfo_dir); + } + remove_proc_entry ("palinfo", 0); + + return 0; +} + +module_init(palinfo_init); +module_exit(palinfo_exit); diff -urN linux-2.4.0-test1/arch/ia64/kernel/pci.c linux-2.4.0-test1-lia/arch/ia64/kernel/pci.c --- linux-2.4.0-test1/arch/ia64/kernel/pci.c Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/pci.c Fri Jun 9 17:23:13 2000 @@ -133,7 +133,7 @@ * Initialization. Uses the SAL interface */ -#define PCI_BUSSES_TO_SCAN 2 /* On "real" ;) hardware this will be 255 */ +#define PCI_BUSSES_TO_SCAN 255 void __init pcibios_init(void) @@ -197,7 +197,7 @@ ranges->mem_end -= bus->resource[1]->start; } -int __init +int pcibios_enable_device (struct pci_dev *dev) { /* Not needed, since we enable all devices at startup. */ diff -urN linux-2.4.0-test1/arch/ia64/kernel/process.c linux-2.4.0-test1-lia/arch/ia64/kernel/process.c --- linux-2.4.0-test1/arch/ia64/kernel/process.c Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/process.c Fri Jun 9 17:23:40 2000 @@ -98,16 +98,49 @@ if (pm_idle) (*pm_idle)(); #ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC - if (ia64_get_itm() < ia64_get_itc()) { - extern void ia64_reset_itm (void); - - printk("cpu_idle: ITM in past, resetting it...\n"); - ia64_reset_itm(); + local_irq_disable(); + { + u64 itc, itm; + + itc = ia64_get_itc(); + itm = ia64_get_itm(); + if (time_after(itc, itm)) { + extern void ia64_reset_itm (void); + + printk("cpu_idle: ITM in past (itc=%lx,itm=%lx:%lums)\n", + itc, itm, (itc - itm)/500000); + ia64_reset_itm(); + } } + local_irq_enable(); #endif } } +void +ia64_save_extra (struct task_struct *task) +{ + extern void ia64_save_debug_regs (unsigned long *save_area); + extern void ia32_save_state (struct thread_struct *thread); + + if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) + ia64_save_debug_regs(&task->thread.dbr[0]); + if (IS_IA32_PROCESS(ia64_task_regs(task))) + ia32_save_state(&task->thread); +} + +void +ia64_load_extra (struct task_struct *task) +{ + extern void ia64_load_debug_regs (unsigned long *save_area); + extern void ia32_load_state (struct thread_struct *thread); + + if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) + ia64_load_debug_regs(&task->thread.dbr[0]); + if (IS_IA32_PROCESS(ia64_task_regs(task))) + ia32_load_state(&task->thread); +} + /* * Copy the state of an ia-64 thread. * @@ -278,7 +311,12 @@ dst[12] = pt->r12; dst[13] = pt->r13; dst[14] = pt->r14; dst[15] = pt->r15; memcpy(dst + 16, &pt->r16, 16*8); /* r16-r31 are contiguous */ +#ifdef CONFIG_IA64_NEW_UNWIND + printk("ia64_elf_core_copy_regs: fix me, please?"); + dst[32] = 0; +#else dst[32] = ia64_get_nat_bits(pt, sw); +#endif dst[33] = pt->pr; /* branch regs: */ @@ -299,6 +337,10 @@ struct switch_stack *sw = ((struct switch_stack *) pt) - 1; struct task_struct *fpu_owner = ia64_get_fpu_owner(); +#ifdef CONFIG_IA64_NEW_UNWIND + printk("dump_fpu: fix me, please?"); +#endif + memset(dst, 0, sizeof (dst)); /* don't leak any "random" bits */ /* f0 is 0.0 */ /* f1 is 1.0 */ dst[2] = sw->f2; dst[3] = sw->f3; @@ -384,7 +426,7 @@ unsigned long get_wchan (struct task_struct *p) { - struct ia64_frame_info info; + struct unw_frame_info info; unsigned long ip; int count = 0; /* @@ -403,11 +445,11 @@ * gracefully if the process wasn't really blocked after all. * --davidm 99/12/15 */ - ia64_unwind_init_from_blocked_task(&info, p); + unw_init_from_blocked_task(&info, p); do { - if (ia64_unwind_to_previous_frame(&info) < 0) + if (unw_unwind(&info) < 0) return 0; - ip = ia64_unwind_get_ip(&info); + unw_get_ip(&info, &ip); if (ip < first_sched || ip >= last_sched) return ip; } while (count++ < 16); diff -urN linux-2.4.0-test1/arch/ia64/kernel/ptrace.c linux-2.4.0-test1-lia/arch/ia64/kernel/ptrace.c --- linux-2.4.0-test1/arch/ia64/kernel/ptrace.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/ptrace.c Fri Jun 9 17:15:55 2000 @@ -30,7 +30,91 @@ * dd (data debug fault disable; one bit) * ri (restart instruction; two bits) */ -#define CR_IPSR_CHANGE_MASK 0x06a00100003eUL +#define IPSR_WRITE_MASK 0x000006a00100003eUL +#define IPSR_READ_MASK IPSR_WRITE_MASK + +#ifdef CONFIG_IA64_NEW_UNWIND + +#define PTRACE_DEBUG 1 + +#if PTRACE_DEBUG +# define dprintk(format...) printk(format) +# define inline +#else +# define dprintk(format...) +#endif + +static int +unwind_to_user (struct unw_frame_info *info, struct task_struct *child) +{ + unsigned long ip; + + unw_init_from_blocked_task(info, child); + while (unw_unwind(info) >= 0) { + if (unw_get_rp(info, &ip) < 0) { + unw_get_ip(info, &ip); + dprintk("ptrace: failed to read return pointer (ip=0x%lx)\n", ip); + return -1; + } + if (ip < TASK_SIZE) + return 0; + } + unw_get_ip(info, &ip); + dprintk("ptrace: failed to unwind to user-level (ip=0x%lx)\n", ip); + return -1; +} + +/* + * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT + * bitset where bit i is set iff the NaT bit of register i is set. + */ +unsigned long +ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) +{ +# define GET_BITS(first, last, unat) \ + ({ \ + unsigned long bit = ia64_unat_pos(&pt->r##first); \ + unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \ + (ia64_rotl(unat, first) >> bit) & mask; \ + }) + unsigned long val; + + val = GET_BITS( 1, 3, scratch_unat); + val |= GET_BITS(12, 15, scratch_unat); + val |= GET_BITS( 8, 11, scratch_unat); + val |= GET_BITS(16, 31, scratch_unat); + return val; + +# undef GET_BITS +} + +/* + * Set the NaT bits for the scratch registers according to NAT and + * return the resulting unat (assuming the scratch registers are + * stored in PT). + */ +unsigned long +ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) +{ + unsigned long scratch_unat; + +# define PUT_BITS(first, last, nat) \ + ({ \ + unsigned long bit = ia64_unat_pos(&pt->r##first); \ + unsigned long mask = ((1UL << (last - first + 1)) - 1) << bit; \ + (ia64_rotr(nat, first) << bit) & mask; \ + }) + scratch_unat = PUT_BITS( 1, 3, nat); + scratch_unat |= PUT_BITS(12, 15, nat); + scratch_unat |= PUT_BITS( 8, 11, nat); + scratch_unat |= PUT_BITS(16, 31, nat); + + return scratch_unat; + +# undef PUT_BITS +} + +#else /* !CONFIG_IA64_NEW_UNWIND */ /* * Collect the NaT bits for r1-r31 from sw->caller_unat and @@ -79,28 +163,26 @@ # undef PUT_BITS } -#define IA64_MLI_TEMPLATE 0x2 +#endif /* !CONFIG_IA64_NEW_UNWIND */ + +#define IA64_MLX_TEMPLATE 0x2 #define IA64_MOVL_OPCODE 6 void ia64_increment_ip (struct pt_regs *regs) { - unsigned long w0, w1, ri = ia64_psr(regs)->ri + 1; + unsigned long w0, ri = ia64_psr(regs)->ri + 1; if (ri > 2) { ri = 0; regs->cr_iip += 16; } else if (ri == 2) { get_user(w0, (char *) regs->cr_iip + 0); - get_user(w1, (char *) regs->cr_iip + 8); - if (((w0 >> 1) & 0xf) == IA64_MLI_TEMPLATE && (w1 >> 60) == IA64_MOVL_OPCODE) { + if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { /* - * rfi'ing to slot 2 of an MLI bundle causes + * rfi'ing to slot 2 of an MLX bundle causes * an illegal operation fault. We don't want - * that to happen... Note that we check the - * opcode only. "movl" has a vc bit of 0, but - * since a vc bit of 1 is currently reserved, - * we might just as well treat it like a movl. + * that to happen... */ ri = 0; regs->cr_iip += 16; @@ -112,21 +194,17 @@ void ia64_decrement_ip (struct pt_regs *regs) { - unsigned long w0, w1, ri = ia64_psr(regs)->ri - 1; + unsigned long w0, ri = ia64_psr(regs)->ri - 1; if (ia64_psr(regs)->ri == 0) { regs->cr_iip -= 16; ri = 2; get_user(w0, (char *) regs->cr_iip + 0); - get_user(w1, (char *) regs->cr_iip + 8); - if (((w0 >> 1) & 0xf) == IA64_MLI_TEMPLATE && (w1 >> 60) == IA64_MOVL_OPCODE) { + if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { /* - * rfi'ing to slot 2 of an MLI bundle causes + * rfi'ing to slot 2 of an MLX bundle causes * an illegal operation fault. We don't want - * that to happen... Note that we check the - * opcode only. "movl" has a vc bit of 0, but - * since a vc bit of 1 is currently reserved, - * we might just as well treat it like a movl. + * that to happen... */ ri = 1; } @@ -291,7 +369,11 @@ laddr = (unsigned long *) addr; child_regs = ia64_task_regs(child); +#ifdef CONFIG_IA64_NEW_UNWIND + child_stack = (struct switch_stack *) (child->thread.ksp + 16); +#else child_stack = (struct switch_stack *) child_regs - 1; +#endif bspstore = (unsigned long *) child_regs->ar_bspstore; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore); @@ -335,7 +417,11 @@ laddr = (unsigned long *) addr; child_regs = ia64_task_regs(child); +#ifdef CONFIG_IA64_NEW_UNWIND + child_stack = (struct switch_stack *) (child->thread.ksp + 16); +#else child_stack = (struct switch_stack *) child_regs - 1; +#endif bspstore = (unsigned long *) child_regs->ar_bspstore; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore); @@ -394,21 +480,42 @@ long new_bsp, int force_loadrs_to_zero) { - unsigned long *krbs, bspstore, bsp, krbs_num_regs, rbs_end, addr, val; - long ndirty, ret; - struct pt_regs *child_regs; + unsigned long *krbs, bspstore, *kbspstore, bsp, rbs_end, addr, val; + long ndirty, ret = 0; + struct pt_regs *child_regs = ia64_task_regs(child); + +#ifdef CONFIG_IA64_NEW_UNWIND + struct unw_frame_info info; + unsigned long cfm, sof; + + if (unwind_to_user(&info, child) < 0) + return -1; + + unw_get_bsp(&info, (unsigned long *) &kbspstore); + + krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; + ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19)); + bspstore = child_regs->ar_bspstore; + bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty); + + cfm = child_regs->cr_ifs; + if (!(cfm & (1UL << 63))) + unw_get_cfm(&info, &cfm); + sof = (cfm & 0x7f); + rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, sof); +#else struct switch_stack *child_stack; + unsigned long krbs_num_regs; - ret = 0; - child_regs = ia64_task_regs(child); child_stack = (struct switch_stack *) child_regs - 1; - + kbspstore = (unsigned long *) child_stack->ar_bspstore; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19)); bspstore = child_regs->ar_bspstore; bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty); - krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore); + krbs_num_regs = ia64_rse_num_regs(krbs, kbspstore); rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, krbs_num_regs); +#endif /* Return early if nothing to do */ if (bsp == new_bsp) @@ -437,13 +544,15 @@ } static void -sync_thread_rbs (struct task_struct *child, int make_writable) +sync_thread_rbs (struct task_struct *child, struct mm_struct *mm, int make_writable) { struct task_struct *p; read_lock(&tasklist_lock); - for_each_task(p) { - if (p->mm == child->mm && p->state != TASK_RUNNING) - sync_kernel_register_backing_store(p, 0, make_writable); + { + for_each_task(p) { + if (p->mm == mm && p->state != TASK_RUNNING) + sync_kernel_register_backing_store(p, 0, make_writable); + } } read_unlock(&tasklist_lock); child->thread.flags |= IA64_THREAD_KRBS_SYNCED; @@ -465,15 +574,380 @@ } } +#ifdef CONFIG_IA64_NEW_UNWIND + +#include + +static int +access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access) +{ + struct ia64_fpreg fpval; + int ret; + + ret = unw_get_fr(info, regnum, &fpval); + if (ret < 0) + return ret; + + if (write_access) { + fpval.u.bits[hi] = *data; + ret = unw_set_fr(info, regnum, fpval); + } else + *data = fpval.u.bits[hi]; + return ret; +} + +static int +access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access) +{ + unsigned long *ptr, *rbs, *bspstore, ndirty, regnum; + struct switch_stack *sw; + struct unw_frame_info info; + struct pt_regs *pt; + + pt = ia64_task_regs(child); + sw = (struct switch_stack *) (child->thread.ksp + 16); + + if ((addr & 0x7) != 0) { + dprintk("ptrace: unaligned register address 0x%lx\n", addr); + return -1; + } + + if (addr < PT_F127 + 16) { + /* accessing fph */ + sync_fph(child); + ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr); + } else if (addr >= PT_F10 && addr < PT_F15 + 16) { + /* scratch registers untouched by kernel (saved in switch_stack) */ + ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS); + } else if (addr < PT_AR_LC + 8) { + /* preserved state: */ + unsigned long nat_bits, scratch_unat, dummy = 0; + struct unw_frame_info info; + char nat = 0; + int ret; + + if (unwind_to_user(&info, child) < 0) + return -1; + + switch (addr) { + case PT_NAT_BITS: + if (write_access) { + nat_bits = *data; + scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); + if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) { + dprintk("ptrace: failed to set ar.unat\n"); + return -1; + } + for (regnum = 4; regnum <= 7; ++regnum) { + unw_get_gr(&info, regnum, &dummy, &nat); + unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1); + } + } else { + if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) { + dprintk("ptrace: failed to read ar.unat\n"); + return -1; + } + nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); + for (regnum = 4; regnum <= 7; ++regnum) { + unw_get_gr(&info, regnum, &dummy, &nat); + nat_bits |= (nat != 0) << regnum; + } + *data = nat_bits; + } + return 0; + + case PT_R4: case PT_R5: case PT_R6: case PT_R7: + if (write_access) { + /* read NaT bit first: */ + ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat); + if (ret < 0) + return ret; + } + return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat, + write_access); + + case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5: + return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access); + + case PT_AR_LC: + return unw_access_ar(&info, UNW_AR_LC, data, write_access); + + default: + if (addr >= PT_F2 && addr < PT_F5 + 16) + return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0, + data, write_access); + else if (addr >= PT_F16 && addr < PT_F31 + 16) + return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0, + data, write_access); + else { + dprintk("ptrace: rejecting access to register address 0x%lx\n", + addr); + return -1; + } + } + } else if (addr < PT_F9+16) { + /* scratch state */ + switch (addr) { + case PT_AR_BSP: + if (write_access) + /* FIXME? Account for lack of ``cover'' in the syscall case */ + return sync_kernel_register_backing_store(child, *data, 1); + else { + rbs = (unsigned long *) child + IA64_RBS_OFFSET/8; + bspstore = (unsigned long *) pt->ar_bspstore; + ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19)); + + /* + * If we're in a system call, no ``cover'' was done. So to + * make things uniform, we'll add the appropriate displacement + * onto bsp if we're in a system call. + */ + if (!(pt->cr_ifs & (1UL << 63))) { + struct unw_frame_info info; + unsigned long cfm; + + if (unwind_to_user(&info, child) < 0) + return -1; + + unw_get_cfm(&info, &cfm); + ndirty += cfm & 0x7f; + } + *data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); + return 0; + } + + case PT_CFM: + if (pt->cr_ifs & (1UL << 63)) { + if (write_access) + pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL) + | (*data & 0x3fffffffffUL)); + else + *data = pt->cr_ifs & 0x3fffffffffUL; + } else { + /* kernel was entered through a system call */ + unsigned long cfm; + + if (unwind_to_user(&info, child) < 0) + return -1; + + unw_get_cfm(&info, &cfm); + if (write_access) + unw_set_cfm(&info, ((cfm & ~0x3fffffffffU) + | (*data & 0x3fffffffffUL))); + else + *data = cfm; + } + return 0; + + case PT_CR_IPSR: + if (write_access) + pt->cr_ipsr = ((*data & IPSR_WRITE_MASK) + | (pt->cr_ipsr & ~IPSR_WRITE_MASK)); + else + *data = (pt->cr_ipsr & IPSR_READ_MASK); + return 0; + + case PT_R1: case PT_R2: case PT_R3: + case PT_R8: case PT_R9: case PT_R10: case PT_R11: + case PT_R12: case PT_R13: case PT_R14: case PT_R15: + case PT_R16: case PT_R17: case PT_R18: case PT_R19: + case PT_R20: case PT_R21: case PT_R22: case PT_R23: + case PT_R24: case PT_R25: case PT_R26: case PT_R27: + case PT_R28: case PT_R29: case PT_R30: case PT_R31: + case PT_B0: case PT_B6: case PT_B7: + case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: + case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: + case PT_AR_BSPSTORE: + case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT: + case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR: + /* scratch register */ + ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR); + break; + + default: + /* disallow accessing anything else... */ + dprintk("ptrace: rejecting access to register address 0x%lx\n", + addr); + return -1; + } + } else { + /* access debug registers */ + + if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { + child->thread.flags |= IA64_THREAD_DBG_VALID; + memset(child->thread.dbr, 0, sizeof(child->thread.dbr)); + memset(child->thread.ibr, 0, sizeof( child->thread.ibr)); + } + if (addr >= PT_IBR) { + regnum = (addr - PT_IBR) >> 3; + ptr = &child->thread.ibr[0]; + } else { + regnum = (addr - PT_DBR) >> 3; + ptr = &child->thread.dbr[0]; + } + + if (regnum >= 8) { + dprintk("ptrace: rejecting access to register address 0x%lx\n", addr); + return -1; + } + + ptr += regnum; + } + if (write_access) + *ptr = *data; + else + *data = *ptr; + return 0; +} + +#else /* !CONFIG_IA64_NEW_UNWIND */ + +static int +access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access) +{ + unsigned long *ptr, *rbs, *bspstore, ndirty, regnum; + struct switch_stack *sw; + struct pt_regs *pt; + + if ((addr & 0x7) != 0) + return -1; + + if (addr < PT_F127+16) { + /* accessing fph */ + sync_fph(child); + ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr); + } else if (addr < PT_F9+16) { + /* accessing switch_stack or pt_regs: */ + pt = ia64_task_regs(child); + sw = (struct switch_stack *) pt - 1; + + switch (addr) { + case PT_NAT_BITS: + if (write_access) + ia64_put_nat_bits(pt, sw, *data); + else + *data = ia64_get_nat_bits(pt, sw); + return 0; + + case PT_AR_BSP: + if (write_access) + /* FIXME? Account for lack of ``cover'' in the syscall case */ + return sync_kernel_register_backing_store(child, *data, 1); + else { + rbs = (unsigned long *) child + IA64_RBS_OFFSET/8; + bspstore = (unsigned long *) pt->ar_bspstore; + ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19)); + + /* + * If we're in a system call, no ``cover'' was done. So to + * make things uniform, we'll add the appropriate displacement + * onto bsp if we're in a system call. + */ + if (!(pt->cr_ifs & (1UL << 63))) + ndirty += sw->ar_pfs & 0x7f; + *data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); + return 0; + } + + case PT_CFM: + if (write_access) { + if (pt->cr_ifs & (1UL << 63)) + pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL) + | (*data & 0x3fffffffffUL)); + else + sw->ar_pfs = ((sw->ar_pfs & ~0x3fffffffffUL) + | (*data & 0x3fffffffffUL)); + return 0; + } else { + if ((pt->cr_ifs & (1UL << 63)) == 0) + *data = sw->ar_pfs; + else + /* return only the CFM */ + *data = pt->cr_ifs & 0x3fffffffffUL; + return 0; + } + + case PT_CR_IPSR: + if (write_access) + pt->cr_ipsr = ((*data & IPSR_WRITE_MASK) + | (pt->cr_ipsr & ~IPSR_WRITE_MASK)); + else + *data = (pt->cr_ipsr & IPSR_READ_MASK); + return 0; + + case PT_R1: case PT_R2: case PT_R3: + case PT_R4: case PT_R5: case PT_R6: case PT_R7: + case PT_R8: case PT_R9: case PT_R10: case PT_R11: + case PT_R12: case PT_R13: case PT_R14: case PT_R15: + case PT_R16: case PT_R17: case PT_R18: case PT_R19: + case PT_R20: case PT_R21: case PT_R22: case PT_R23: + case PT_R24: case PT_R25: case PT_R26: case PT_R27: + case PT_R28: case PT_R29: case PT_R30: case PT_R31: + case PT_B0: case PT_B1: case PT_B2: case PT_B3: + case PT_B4: case PT_B5: case PT_B6: case PT_B7: + case PT_F2: case PT_F2+8: case PT_F3: case PT_F3+8: + case PT_F4: case PT_F4+8: case PT_F5: case PT_F5+8: + case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: + case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: + case PT_F10: case PT_F10+8: case PT_F11: case PT_F11+8: + case PT_F12: case PT_F12+8: case PT_F13: case PT_F13+8: + case PT_F14: case PT_F14+8: case PT_F15: case PT_F15+8: + case PT_F16: case PT_F16+8: case PT_F17: case PT_F17+8: + case PT_F18: case PT_F18+8: case PT_F19: case PT_F19+8: + case PT_F20: case PT_F20+8: case PT_F21: case PT_F21+8: + case PT_F22: case PT_F22+8: case PT_F23: case PT_F23+8: + case PT_F24: case PT_F24+8: case PT_F25: case PT_F25+8: + case PT_F26: case PT_F26+8: case PT_F27: case PT_F27+8: + case PT_F28: case PT_F28+8: case PT_F29: case PT_F29+8: + case PT_F30: case PT_F30+8: case PT_F31: case PT_F31+8: + case PT_AR_BSPSTORE: + case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT: + case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR: + case PT_AR_LC: + ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS); + break; + + default: + /* disallow accessing anything else... */ + return -1; + } + } else { + /* access debug registers */ + + if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { + child->thread.flags |= IA64_THREAD_DBG_VALID; + memset(child->thread.dbr, 0, sizeof child->thread.dbr); + memset(child->thread.ibr, 0, sizeof child->thread.ibr); + } + if (addr >= PT_IBR) { + regnum = (addr - PT_IBR) >> 3; + ptr = &child->thread.ibr[0]; + } else { + regnum = (addr - PT_DBR) >> 3; + ptr = &child->thread.dbr[0]; + } + + if (regnum >= 8) + return -1; + + ptr += regnum; + } + if (write_access) + *ptr = *data; + else + *data = *ptr; + return 0; +} + +#endif /* !CONFIG_IA64_NEW_UNWIND */ + asmlinkage long sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, long arg4, long arg5, long arg6, long arg7, long stack) { struct pt_regs *regs = (struct pt_regs *) &stack; - struct switch_stack *child_stack; - struct pt_regs *child_regs; struct task_struct *child; - unsigned long flags, regnum, *base; + unsigned long flags; long ret; lock_kernel(); @@ -489,17 +963,21 @@ ret = -ESRCH; read_lock(&tasklist_lock); - child = find_task_by_pid(pid); + { + child = find_task_by_pid(pid); + if (child) + get_task_struct(child); + } read_unlock(&tasklist_lock); if (!child) goto out; ret = -EPERM; if (pid == 1) /* no messing around with init! */ - goto out; + goto out_tsk; if (request == PTRACE_ATTACH) { if (child == current) - goto out; + goto out_tsk; if ((!child->dumpable || (current->uid != child->euid) || (current->uid != child->suid) || @@ -508,10 +986,10 @@ (current->gid != child->sgid) || (!cap_issubset(child->cap_permitted, current->cap_permitted)) || (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE)) - goto out; + goto out_tsk; /* the same process cannot be attached many times */ if (child->flags & PF_PTRACED) - goto out; + goto out_tsk; child->flags |= PF_PTRACED; if (child->p_pptr != current) { unsigned long flags; @@ -524,199 +1002,98 @@ } send_sig(SIGSTOP, child, 1); ret = 0; - goto out; + goto out_tsk; } ret = -ESRCH; if (!(child->flags & PF_PTRACED)) - goto out; + goto out_tsk; if (child->state != TASK_STOPPED) { if (request != PTRACE_KILL) - goto out; + goto out_tsk; } if (child->p_pptr != current) - goto out; + goto out_tsk; switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: /* read word at location addr */ - if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED) - && atomic_read(&child->mm->mm_users) > 1) - sync_thread_rbs(child, 0); + if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) { + struct mm_struct *mm; + long do_sync; + + task_lock(child); + { + mm = child->mm; + do_sync = mm && (atomic_read(&mm->mm_users) > 1); + } + task_unlock(child); + if (do_sync) + sync_thread_rbs(child, mm, 0); + } ret = ia64_peek(regs, child, addr, &data); if (ret == 0) { ret = data; regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */ } - goto out; + goto out_tsk; case PTRACE_POKETEXT: case PTRACE_POKEDATA: /* write the word at location addr */ - if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED) - && atomic_read(&child->mm->mm_users) > 1) - sync_thread_rbs(child, 1); + if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) { + struct mm_struct *mm; + long do_sync; + + task_lock(child); + { + mm = child->mm; + do_sync = mm && (atomic_read(&child->mm->mm_users) > 1); + } + task_unlock(child); + if (do_sync) + sync_thread_rbs(child, mm, 1); + } ret = ia64_poke(regs, child, addr, data); - goto out; + goto out_tsk; case PTRACE_PEEKUSR: /* read the word at addr in the USER area */ - ret = -EIO; - if ((addr & 0x7) != 0) - goto out; - - if (addr < PT_CALLER_UNAT) { - /* accessing fph */ - sync_fph(child); - addr += (unsigned long) &child->thread.fph; - ret = *(unsigned long *) addr; - } else if (addr < PT_F9+16) { - /* accessing switch_stack or pt_regs: */ - child_regs = ia64_task_regs(child); - child_stack = (struct switch_stack *) child_regs - 1; - ret = *(unsigned long *) ((long) child_stack + addr - PT_CALLER_UNAT); - - if (addr == PT_AR_BSP) { - /* ret currently contains pt_regs.loadrs */ - unsigned long *rbs, *bspstore, ndirty; - - rbs = (unsigned long *) child + IA64_RBS_OFFSET/8; - bspstore = (unsigned long *) child_regs->ar_bspstore; - ndirty = ia64_rse_num_regs(rbs, rbs + (ret >> 19)); - ret = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); - - /* - * If we're in a system call, no ``cover'' was done. So - * to make things uniform, we'll add the appropriate - * displacement onto bsp if we're in a system call. - * - * Note: It may be better to leave the system call case - * alone and subtract the amount of the cover for the - * non-syscall case. That way the reported bsp value - * would actually be the correct bsp for the child - * process. - */ - if (!(child_regs->cr_ifs & (1UL << 63))) { - ret = (unsigned long) - ia64_rse_skip_regs((unsigned long *) ret, - child_stack->ar_pfs & 0x7f); - } - } else if (addr == PT_CFM) { - /* ret currently contains pt_regs.cr_ifs */ - if ((ret & (1UL << 63)) == 0) - ret = child_stack->ar_pfs; - ret &= 0x3fffffffffUL; /* return only the CFM */ - } - } else { - if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { - child->thread.flags |= IA64_THREAD_DBG_VALID; - memset(child->thread.dbr, 0, sizeof child->thread.dbr); - memset(child->thread.ibr, 0, sizeof child->thread.ibr); - } - if (addr >= PT_IBR) { - regnum = (addr - PT_IBR) >> 3; - base = &child->thread.ibr[0]; - } else { - regnum = (addr - PT_DBR) >> 3; - base = &child->thread.dbr[0]; - } - if (regnum >= 8) - goto out; - ret = base[regnum]; + if (access_uarea(child, addr, &data, 0) < 0) { + ret = -EIO; + goto out_tsk; } + ret = data; regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */ - goto out; + goto out_tsk; case PTRACE_POKEUSR: /* write the word at addr in the USER area */ - ret = -EIO; - if ((addr & 0x7) != 0) - goto out; - - if (addr < PT_CALLER_UNAT) { - /* accessing fph */ - sync_fph(child); - addr += (unsigned long) &child->thread.fph; - *(unsigned long *) addr = data; - } else if (addr == PT_AR_BSPSTORE || addr == PT_CALLER_UNAT - || addr == PT_KERNEL_FPSR || addr == PT_K_B0 || addr == PT_K_AR_PFS - || (PT_K_AR_UNAT <= addr && addr <= PT_K_PR)) { - /* - * Don't permit changes to certain registers. - * - * We don't allow bspstore to be modified because doing - * so would mess up any modifications to bsp. (See - * sync_kernel_register_backing_store for the details.) - */ - goto out; - } else if (addr == PT_AR_BSP) { - /* FIXME? Account for lack of ``cover'' in the syscall case */ - ret = sync_kernel_register_backing_store(child, data, 1); - goto out; - } else if (addr == PT_CFM) { - child_regs = ia64_task_regs(child); - child_stack = (struct switch_stack *) child_regs - 1; - - if (child_regs->cr_ifs & (1UL << 63)) { - child_regs->cr_ifs = (child_regs->cr_ifs & ~0x3fffffffffUL) - | (data & 0x3fffffffffUL); - } else { - child_stack->ar_pfs = (child_stack->ar_pfs & ~0x3fffffffffUL) - | (data & 0x3fffffffffUL); - } - } else if (addr < PT_F9+16) { - /* accessing switch_stack or pt_regs */ - child_regs = ia64_task_regs(child); - child_stack = (struct switch_stack *) child_regs - 1; - - if (addr == PT_CR_IPSR) - data = (data & CR_IPSR_CHANGE_MASK) - | (child_regs->cr_ipsr & ~CR_IPSR_CHANGE_MASK); - - *(unsigned long *) ((long) child_stack + addr - PT_CALLER_UNAT) = data; - } else { - if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { - child->thread.flags |= IA64_THREAD_DBG_VALID; - memset(child->thread.dbr, 0, sizeof child->thread.dbr); - memset(child->thread.ibr, 0, sizeof child->thread.ibr); - } - - if (addr >= PT_IBR) { - regnum = (addr - PT_IBR) >> 3; - base = &child->thread.ibr[0]; - } else { - regnum = (addr - PT_DBR) >> 3; - base = &child->thread.dbr[0]; - } - if (regnum >= 8) - goto out; - if (regnum & 1) { - /* force breakpoint to be effective only for user-level: */ - data &= ~(0x7UL << 56); - } - base[regnum] = data; + if (access_uarea(child, addr, &data, 1) < 0) { + ret = -EIO; + goto out_tsk; } ret = 0; - goto out; + goto out_tsk; case PTRACE_GETSIGINFO: ret = -EIO; if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t)) || child->thread.siginfo == 0) - goto out; + goto out_tsk; copy_to_user((siginfo_t *) data, child->thread.siginfo, sizeof (siginfo_t)); ret = 0; - goto out; + goto out_tsk; break; case PTRACE_SETSIGINFO: ret = -EIO; if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t)) || child->thread.siginfo == 0) - goto out; + goto out_tsk; copy_from_user(child->thread.siginfo, (siginfo_t *) data, sizeof (siginfo_t)); ret = 0; - goto out; + goto out_tsk; case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: /* restart after signal. */ ret = -EIO; if (data > _NSIG) - goto out; + goto out_tsk; if (request == PTRACE_SYSCALL) child->flags |= PF_TRACESYS; else @@ -732,7 +1109,7 @@ wake_up_process(child); ret = 0; - goto out; + goto out_tsk; case PTRACE_KILL: /* @@ -741,7 +1118,7 @@ * that it wants to exit. */ if (child->state == TASK_ZOMBIE) /* already dead */ - goto out; + goto out_tsk; child->exit_code = SIGKILL; /* make sure the single step/take-branch tra bits are not set: */ @@ -753,13 +1130,13 @@ wake_up_process(child); ret = 0; - goto out; + goto out_tsk; case PTRACE_SINGLESTEP: /* let child execute for one instruction */ case PTRACE_SINGLEBLOCK: ret = -EIO; if (data > _NSIG) - goto out; + goto out_tsk; child->flags &= ~PF_TRACESYS; if (request == PTRACE_SINGLESTEP) { @@ -775,12 +1152,12 @@ /* give it a chance to run. */ wake_up_process(child); ret = 0; - goto out; + goto out_tsk; case PTRACE_DETACH: /* detach a process that was attached. */ ret = -EIO; if (data > _NSIG) - goto out; + goto out_tsk; child->flags &= ~(PF_PTRACED|PF_TRACESYS); child->exit_code = data; @@ -799,12 +1176,14 @@ wake_up_process(child); ret = 0; - goto out; + goto out_tsk; default: ret = -EIO; - goto out; + goto out_tsk; } + out_tsk: + free_task_struct(child); out: unlock_kernel(); return ret; diff -urN linux-2.4.0-test1/arch/ia64/kernel/sal_stub.S linux-2.4.0-test1-lia/arch/ia64/kernel/sal_stub.S --- linux-2.4.0-test1/arch/ia64/kernel/sal_stub.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/sal_stub.S Wed Dec 31 16:00:00 1969 @@ -1,118 +0,0 @@ -/* - * Copyright (C) 1998-2000 Hewlett-Packard Co - * Copyright (C) 1998-2000 David Mosberger-Tang - */ -#ifndef __GCC_MULTIREG_RETVALS__ - /* - * gcc currently does not conform to the ia-64 calling - * convention as far as returning function values are - * concerned. Instead of returning values up to 32 bytes in - * size in r8-r11, gcc returns any value bigger than a - * doubleword via a structure that's allocated by the caller - * and whose address is passed into the function. Since - * SAL_PROC returns values according to the calling - * convention, this stub takes care of copying r8-r11 to the - * place where gcc expects them. - */ - .text - .psr abi64 - .psr lsb - .lsb - - .align 16 - .global ia64_sal_stub -ia64_sal_stub: - /* - * Sheesh, the Cygnus backend passes the pointer to a return value structure in - * in0 whereas the HP backend passes it in r8. Don't you hate those little - * differences... - */ -#ifdef GCC_RETVAL_POINTER_IN_R8 - adds r2=-24,sp - adds sp=-48,sp - mov r14=rp - ;; - st8 [r2]=r8,8 // save pointer to return value - addl r3=@ltoff(ia64_sal),gp - ;; - ld8 r3=[r3] - st8 [r2]=gp,8 // save global pointer - ;; - ld8 r3=[r3] // fetch the value of ia64_sal - st8 [r2]=r14 // save return pointer - ;; - ld8 r2=[r3],8 // load function's entry point - ;; - ld8 gp=[r3] // load function's global pointer - ;; - mov b6=r2 - br.call.sptk.few rp=b6 -.ret0: adds r2=24,sp - ;; - ld8 r3=[r2],8 // restore pointer to return value - ;; - ld8 gp=[r2],8 // restore global pointer - st8 [r3]=r8,8 - ;; - ld8 r14=[r2] // restore return pointer - st8 [r3]=r9,8 - ;; - mov rp=r14 - st8 [r3]=r10,8 - ;; - st8 [r3]=r11,8 - adds sp=48,sp - br.sptk.few rp -#else - /* - * On input: - * in0 = pointer to return value structure - * in1 = index of SAL function to call - * in2..inN = remaining args to SAL call - */ - /* - * We allocate one input and eight output register such that the br.call instruction - * will rename in1-in7 to in0-in6---exactly what we want because SAL doesn't want to - * see the pointer to the return value structure. - */ - alloc r15=ar.pfs,1,0,8,0 - - adds r2=-24,sp - adds sp=-48,sp - mov r14=rp - ;; - st8 [r2]=r15,8 // save ar.pfs - addl r3=@ltoff(ia64_sal),gp - ;; - ld8 r3=[r3] // get address of ia64_sal - st8 [r2]=gp,8 // save global pointer - ;; - ld8 r3=[r3] // get value of ia64_sal - st8 [r2]=r14,8 // save return address (rp) - ;; - ld8 r2=[r3],8 // load function's entry point - ;; - ld8 gp=[r3] // load function's global pointer - mov b6=r2 - br.call.sptk.few rp=b6 // make SAL call -.ret0: adds r2=24,sp - ;; - ld8 r15=[r2],8 // restore ar.pfs - ;; - ld8 gp=[r2],8 // restore global pointer - st8 [in0]=r8,8 // store 1. dword of return value - ;; - ld8 r14=[r2] // restore return address (rp) - st8 [in0]=r9,8 // store 2. dword of return value - ;; - mov rp=r14 - st8 [in0]=r10,8 // store 3. dword of return value - ;; - st8 [in0]=r11,8 - adds sp=48,sp // pop stack frame - mov ar.pfs=r15 - br.ret.sptk.few rp -#endif - - .endp ia64_sal_stub -#endif /* __GCC_MULTIREG_RETVALS__ */ diff -urN linux-2.4.0-test1/arch/ia64/kernel/setup.c linux-2.4.0-test1-lia/arch/ia64/kernel/setup.c --- linux-2.4.0-test1/arch/ia64/kernel/setup.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/setup.c Fri Jun 9 17:16:12 2000 @@ -36,6 +36,10 @@ #include #include +#ifdef CONFIG_BLK_DEV_RAM +# include +#endif + extern char _end; /* cpu_data[bootstrap_processor] is data for the bootstrap processor: */ @@ -108,6 +112,8 @@ { unsigned long max_pfn, bootmap_start, bootmap_size; + unw_init(); + /* * The secondary bootstrap loader passes us the boot * parameters at the beginning of the ZERO_PAGE, so let's @@ -125,11 +131,22 @@ * change APIs, they'd do things for the better. Grumble... */ bootmap_start = PAGE_ALIGN(__pa(&_end)); + if (ia64_boot_param.initrd_size) + bootmap_start = PAGE_ALIGN(bootmap_start + ia64_boot_param.initrd_size); bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn); efi_memmap_walk(free_available_memory, 0); reserve_bootmem(bootmap_start, bootmap_size); +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = ia64_boot_param.initrd_start; + if (initrd_start) { + initrd_end = initrd_start+ia64_boot_param.initrd_size; + printk("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *) initrd_start, ia64_boot_param.initrd_size); + reserve_bootmem(virt_to_phys(initrd_start), ia64_boot_param.initrd_size); + } +#endif #if 0 /* XXX fix me */ init_mm.start_code = (unsigned long) &_stext; @@ -155,10 +172,8 @@ #ifdef CONFIG_SMP bootstrap_processor = hard_smp_processor_id(); current->processor = bootstrap_processor; -#else - cpu_init(); - identify_cpu(&cpu_data[0]); #endif + cpu_init(); /* initialize the bootstrap CPU */ if (efi.acpi) { /* Parse the ACPI tables */ @@ -270,35 +285,18 @@ u64 features; } field; } cpuid; + pal_vm_info_1_u_t vm1; + pal_vm_info_2_u_t vm2; + pal_status_t status; + unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ int i; - for (i = 0; i < 5; ++i) { + for (i = 0; i < 5; ++i) cpuid.bits[i] = ia64_get_cpuid(i); - } -#ifdef CONFIG_SMP - /* - * XXX Instead of copying the ITC info from the bootstrap - * processor, ia64_init_itm() should be done per CPU. That - * should get you the right info. --davidm 1/24/00 - */ - if (c != &cpu_data[bootstrap_processor]) { - memset(c, 0, sizeof(struct cpuinfo_ia64)); - c->proc_freq = cpu_data[bootstrap_processor].proc_freq; - c->itc_freq = cpu_data[bootstrap_processor].itc_freq; - c->cyc_per_usec = cpu_data[bootstrap_processor].cyc_per_usec; - c->usec_per_cyc = cpu_data[bootstrap_processor].usec_per_cyc; - } -#else memset(c, 0, sizeof(struct cpuinfo_ia64)); -#endif memcpy(c->vendor, cpuid.field.vendor, 16); -#ifdef CONFIG_IA64_SOFTSDV_HACKS - /* BUG: SoftSDV doesn't support the cpuid registers. */ - if (c->vendor[0] == '\0') - memcpy(c->vendor, "Intel", 6); -#endif c->ppn = cpuid.field.ppn; c->number = cpuid.field.number; c->revision = cpuid.field.revision; @@ -306,8 +304,29 @@ c->family = cpuid.field.family; c->archrev = cpuid.field.archrev; c->features = cpuid.field.features; -#ifdef CONFIG_SMP - c->loops_per_sec = loops_per_sec; + + status = ia64_pal_vm_summary(&vm1, &vm2); + if (status == PAL_STATUS_SUCCESS) { +#if 1 + /* + * XXX the current PAL code returns IMPL_VA_MSB==60, which is dead-wrong. + * --davidm 00/05/26 + s*/ + impl_va_msb = 50; +#else + impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; +#endif + phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; + } + printk("processor implements %lu virtual and %lu physical address bits\n", + impl_va_msb + 1, phys_addr_size); + c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); + c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); + +#ifdef CONFIG_IA64_SOFTSDV_HACKS + /* BUG: SoftSDV doesn't support the cpuid registers. */ + if (c->vendor[0] == '\0') + memcpy(c->vendor, "Intel", 6); #endif } @@ -318,6 +337,11 @@ void cpu_init (void) { + extern void __init ia64_rid_init (void); + extern void __init ia64_tlb_init (void); + + identify_cpu(&my_cpu_data); + /* Clear the stack memory reserved for pt_regs: */ memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); @@ -331,6 +355,14 @@ */ ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP); ia64_set_fpu_owner(0); /* initialize ar.k5 */ + atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; + + ia64_rid_init(); + ia64_tlb_init(); + +#ifdef CONFIG_SMP + normal_xtp(); +#endif } diff -urN linux-2.4.0-test1/arch/ia64/kernel/signal.c linux-2.4.0-test1-lia/arch/ia64/kernel/signal.c --- linux-2.4.0-test1/arch/ia64/kernel/signal.c Wed May 24 18:38:26 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/signal.c Fri Jun 9 17:16:43 2000 @@ -37,16 +37,26 @@ # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif +struct sigscratch { +#ifdef CONFIG_IA64_NEW_UNWIND + unsigned long scratch_unat; /* ar.unat for the general registers saved in pt */ + unsigned long pad; +#else + struct switch_stack sw; +#endif + struct pt_regs pt; +}; + struct sigframe { struct siginfo info; struct sigcontext sc; }; extern long sys_wait4 (int, int *, int, struct rusage *); -extern long ia64_do_signal (sigset_t *, struct pt_regs *, long); /* forward decl */ +extern long ia64_do_signal (sigset_t *, struct sigscratch *, long); /* forward decl */ long -ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct pt_regs *pt) +ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr) { sigset_t oldset, set; @@ -71,12 +81,19 @@ * pre-set the correct error code here to ensure that the right values * get saved in sigcontext by ia64_do_signal. */ - pt->r8 = EINTR; - pt->r10 = -1; +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(&scr->pt)) { + scr->pt.r8 = -EINTR; + } else +#endif + { + scr->pt.r8 = EINTR; + scr->pt.r10 = -1; + } while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); - if (ia64_do_signal(&oldset, pt, 1)) + if (ia64_do_signal(&oldset, scr, 1)) return -EINTR; } } @@ -91,9 +108,8 @@ } static long -restore_sigcontext (struct sigcontext *sc, struct pt_regs *pt) +restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr) { - struct switch_stack *sw = (struct switch_stack *) pt - 1; unsigned long ip, flags, nat, um, cfm; long err; @@ -104,28 +120,32 @@ err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ err |= __get_user(cfm, &sc->sc_cfm); err |= __get_user(um, &sc->sc_um); /* user mask */ - err |= __get_user(pt->ar_rsc, &sc->sc_ar_rsc); - err |= __get_user(pt->ar_ccv, &sc->sc_ar_ccv); - err |= __get_user(pt->ar_unat, &sc->sc_ar_unat); - err |= __get_user(pt->ar_fpsr, &sc->sc_ar_fpsr); - err |= __get_user(pt->ar_pfs, &sc->sc_ar_pfs); - err |= __get_user(pt->pr, &sc->sc_pr); /* predicates */ - err |= __get_user(pt->b0, &sc->sc_br[0]); /* b0 (rp) */ - err |= __get_user(pt->b6, &sc->sc_br[6]); /* b6 */ - err |= __get_user(pt->b7, &sc->sc_br[7]); /* b7 */ - err |= __copy_from_user(&pt->r1, &sc->sc_gr[1], 3*8); /* r1-r3 */ - err |= __copy_from_user(&pt->r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ - err |= __copy_from_user(&pt->r12, &sc->sc_gr[12], 4*8); /* r12-r15 */ - err |= __copy_from_user(&pt->r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ + err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); + err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); + err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); + err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); + err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); + err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ + err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ + err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ + err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ + err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 3*8); /* r1-r3 */ + err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ + err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 4*8); /* r12-r15 */ + err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ - pt->cr_ifs = cfm | (1UL << 63); + scr->pt.cr_ifs = cfm | (1UL << 63); /* establish new instruction pointer: */ - pt->cr_iip = ip & ~0x3UL; - ia64_psr(pt)->ri = ip & 0x3; - pt->cr_ipsr = (pt->cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); - - ia64_put_nat_bits (pt, sw, nat); /* restore the original scratch NaT bits */ + scr->pt.cr_iip = ip & ~0x3UL; + ia64_psr(&scr->pt)->ri = ip & 0x3; + scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); + +#ifdef CONFIG_IA64_NEW_UNWIND + scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); +#else + ia64_put_nat_bits(&scr->pt, &scr->sw, nat); /* restore the original scratch NaT bits */ +#endif if (flags & IA64_SC_FLAG_FPH_VALID) { struct task_struct *fpu_owner = ia64_get_fpu_owner(); @@ -138,7 +158,8 @@ return err; } -int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from) +int +copy_siginfo_to_user (siginfo_t *to, siginfo_t *from) { if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; @@ -147,43 +168,39 @@ else { int err; - /* If you change siginfo_t structure, please be sure - this code is fixed accordingly. - It should never copy any pad contained in the structure - to avoid security leaks, but must copy the generic - 3 ints plus the relevant union member. */ + /* + * If you change siginfo_t structure, please be sure + * this code is fixed accordingly. It should never + * copy any pad contained in the structure to avoid + * security leaks, but must copy the generic 3 ints + * plus the relevant union member. + */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code >> 16) { - case __SI_FAULT >> 16: - case __SI_POLL >> 16: + case __SI_FAULT >> 16: + err |= __put_user(from->si_isr, &to->si_isr); + case __SI_POLL >> 16: err |= __put_user(from->si_addr, &to->si_addr); err |= __put_user(from->si_imm, &to->si_imm); break; - case __SI_CHLD >> 16: + case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); - default: + default: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); break; - /* case __SI_RT: This is not generated by the kernel as of now. */ + /* case __SI_RT: This is not generated by the kernel as of now. */ } return err; } } -/* - * When we get here, ((struct switch_stack *) pt - 1) is a - * switch_stack frame that has no defined value. Upon return, we - * expect sw->caller_unat to contain the new unat value. The reason - * we use a full switch_stack frame is so everything is symmetric - * with ia64_do_signal(). - */ long -ia64_rt_sigreturn (struct pt_regs *pt) +ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext *sc; @@ -191,7 +208,7 @@ sigset_t set; long retval; - sc = &((struct sigframe *) (pt->r12 + 16))->sc; + sc = &((struct sigframe *) (scr->pt.r12 + 16))->sc; /* * When we return to the previously executing context, r8 and @@ -200,9 +217,15 @@ * must not touch r8 or r10 as otherwise user-level stat could * be corrupted. */ - retval = (long) &ia64_leave_kernel | 1; - if ((current->flags & PF_TRACESYS) - && (sc->sc_flags & IA64_SC_FLAG_IN_SYSCALL)) + retval = (long) &ia64_leave_kernel; + if (current->flags & PF_TRACESYS) + /* + * strace expects to be notified after sigreturn + * returns even though the context to which we return + * may not be in the middle of a syscall. Thus, the + * return-value that strace displays for sigreturn is + * meaningless. + */ retval = (long) &ia64_strace_leave_kernel; if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) @@ -217,18 +240,18 @@ recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); - if (restore_sigcontext(sc, pt)) + if (restore_sigcontext(sc, scr)) goto give_sigsegv; #if DEBUG_SIG printk("SIG return (%s:%d): sp=%lx ip=%lx\n", - current->comm, current->pid, pt->r12, pt->cr_iip); + current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); #endif /* * It is more difficult to avoid calling this function than to * call it and ignore errors. */ - do_sigaltstack(&sc->sc_stack, 0, pt->r12); + do_sigaltstack(&sc->sc_stack, 0, scr->pt.r12); return retval; give_sigsegv: @@ -249,14 +272,13 @@ * trampoline starts. Everything else is done at the user-level. */ static long -setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct pt_regs *pt) +setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr) { - struct switch_stack *sw = (struct switch_stack *) pt - 1; struct task_struct *fpu_owner = ia64_get_fpu_owner(); unsigned long flags = 0, ifs, nat; long err; - ifs = pt->cr_ifs; + ifs = scr->pt.cr_ifs; if (on_sig_stack((unsigned long) sc)) flags |= IA64_SC_FLAG_ONSTACK; @@ -276,46 +298,49 @@ * Note: sw->ar_unat is UNDEFINED unless the process is being * PTRACED. However, this is OK because the NaT bits of the * preserved registers (r4-r7) are never being looked at by - * the signal handler (register r4-r7 are used instead). + * the signal handler (registers r4-r7 are used instead). */ - nat = ia64_get_nat_bits(pt, sw); +#ifdef CONFIG_IA64_NEW_UNWIND + nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); +#else + nat = ia64_get_nat_bits(&scr->pt, &scr->sw); +#endif err = __put_user(flags, &sc->sc_flags); err |= __put_user(nat, &sc->sc_nat); err |= PUT_SIGSET(mask, &sc->sc_mask); - err |= __put_user(pt->cr_ipsr & IA64_PSR_UM, &sc->sc_um); - err |= __put_user(pt->ar_rsc, &sc->sc_ar_rsc); - err |= __put_user(pt->ar_ccv, &sc->sc_ar_ccv); - err |= __put_user(pt->ar_unat, &sc->sc_ar_unat); /* ar.unat */ - err |= __put_user(pt->ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ - err |= __put_user(pt->ar_pfs, &sc->sc_ar_pfs); - err |= __put_user(pt->pr, &sc->sc_pr); /* predicates */ - err |= __put_user(pt->b0, &sc->sc_br[0]); /* b0 (rp) */ - err |= __put_user(pt->b6, &sc->sc_br[6]); /* b6 */ - err |= __put_user(pt->b7, &sc->sc_br[7]); /* b7 */ - - err |= __copy_to_user(&sc->sc_gr[1], &pt->r1, 3*8); /* r1-r3 */ - err |= __copy_to_user(&sc->sc_gr[8], &pt->r8, 4*8); /* r8-r11 */ - err |= __copy_to_user(&sc->sc_gr[12], &pt->r12, 4*8); /* r12-r15 */ - err |= __copy_to_user(&sc->sc_gr[16], &pt->r16, 16*8); /* r16-r31 */ + err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); + err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); + err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); + err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ + err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ + err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); + err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ + err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ + err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ + err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ + + err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 3*8); /* r1-r3 */ + err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ + err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 4*8); /* r12-r15 */ + err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ - err |= __put_user(pt->cr_iip + ia64_psr(pt)->ri, &sc->sc_ip); - err |= __put_user(pt->r12, &sc->sc_gr[12]); /* r12 */ + err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); return err; } static long -setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *pt) +setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, + struct sigscratch *scr) { - struct switch_stack *sw = (struct switch_stack *) pt - 1; extern char ia64_sigtramp[], __start_gate_section[]; unsigned long tramp_addr, new_rbs = 0; struct sigframe *frame; struct siginfo si; long err; - frame = (void *) pt->r12; + frame = (void *) scr->pt.r12; tramp_addr = GATE_ADDR + (ia64_sigtramp - __start_gate_section); if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack((unsigned long) frame)) { new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); @@ -331,31 +356,39 @@ err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); - err |= __put_user(sas_ss_flags(pt->r12), &frame->sc.sc_stack.ss_flags); - err |= setup_sigcontext(&frame->sc, set, pt); + err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); + err |= setup_sigcontext(&frame->sc, set, scr); if (err) goto give_sigsegv; - pt->r12 = (unsigned long) frame - 16; /* new stack pointer */ - pt->r2 = sig; /* signal number */ - pt->r3 = (unsigned long) ka->sa.sa_handler; /* addr. of handler's proc. descriptor */ - pt->r15 = new_rbs; - pt->ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ - pt->cr_iip = tramp_addr; - ia64_psr(pt)->ri = 0; /* start executing in first slot */ + scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ + scr->pt.r2 = sig; /* signal number */ + scr->pt.r3 = (unsigned long) ka->sa.sa_handler; /* addr. of handler's proc desc */ + scr->pt.r15 = new_rbs; + scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ + scr->pt.cr_iip = tramp_addr; + ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ +#ifdef CONFIG_IA64_NEW_UNWIND /* * Note: this affects only the NaT bits of the scratch regs - * (the ones saved in pt_regs, which is exactly what we want. + * (the ones saved in pt_regs), which is exactly what we want. + */ + scr->scratch_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */ +#else + /* + * Note: this affects only the NaT bits of the scratch regs + * (the ones saved in pt_regs), which is exactly what we want. * The NaT bits for the preserved regs (r4-r7) are in * sw->ar_unat iff this process is being PTRACED. */ - sw->caller_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */ + scr->sw.caller_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */ +#endif #if DEBUG_SIG printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%lx\n", - current->comm, current->pid, sig, pt->r12, pt->cr_iip, pt->r3); + current->comm, current->pid, sig, scr->pt.r12, scr->pt.cr_iip, scr->pt.r3); #endif return 1; @@ -374,17 +407,17 @@ static long handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, - struct pt_regs *pt) + struct sigscratch *scr) { #ifdef CONFIG_IA32_SUPPORT - if (IS_IA32_PROCESS(pt)) { + if (IS_IA32_PROCESS(&scr->pt)) { /* send signal to IA-32 process */ - if (!ia32_setup_frame1(sig, ka, info, oldset, pt)) + if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt)) return 0; } else #endif /* send signal to IA-64 process */ - if (!setup_frame(sig, ka, info, oldset, pt)) + if (!setup_frame(sig, ka, info, oldset, scr)) return 0; if (ka->sa.sa_flags & SA_ONESHOT) @@ -401,12 +434,6 @@ } /* - * When we get here, `pt' points to struct pt_regs and ((struct - * switch_stack *) pt - 1) points to a switch stack structure. - * HOWEVER, in the normal case, the ONLY value valid in the - * switch_stack is the caller_unat field. The entire switch_stack is - * valid ONLY if current->flags has PF_PTRACED set. - * * Note that `init' is a special process: it doesn't get signals it * doesn't want to handle. Thus you cannot kill init even with a * SIGKILL even by mistake. @@ -416,24 +443,35 @@ * user-level signal handling stack-frames in one go after that. */ long -ia64_do_signal (sigset_t *oldset, struct pt_regs *pt, long in_syscall) +ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) { struct k_sigaction *ka; siginfo_t info; long restart = in_syscall; + long errno = scr->pt.r8; /* * In the ia64_leave_kernel code path, we want the common case * to go fast, which is why we may in certain cases get here * from kernel mode. Just return without doing anything if so. */ - if (!user_mode(pt)) + if (!user_mode(&scr->pt)) return 0; if (!oldset) oldset = ¤t->blocked; - if (pt->r10 != -1) { +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(&scr->pt)) { + if (in_syscall) { + if (errno >= 0) + restart = 0; + else + errno = -errno; + } + } else +#endif + if (scr->pt.r10 != -1) { /* * A system calls has to be restarted only if one of * the error codes ERESTARTNOHAND, ERESTARTSYS, or @@ -527,7 +565,7 @@ case SIGQUIT: case SIGILL: case SIGTRAP: case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ: - if (do_coredump(signr, pt)) + if (do_coredump(signr, &scr->pt)) exit_code |= 0x80; /* FALLTHRU */ @@ -542,39 +580,54 @@ } if (restart) { - switch (pt->r8) { + switch (errno) { case ERESTARTSYS: if ((ka->sa.sa_flags & SA_RESTART) == 0) { case ERESTARTNOHAND: - pt->r8 = EINTR; - /* note: pt->r10 is already -1 */ +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(&scr->pt)) + scr->pt.r8 = -EINTR; + else +#endif + scr->pt.r8 = EINTR; + /* note: scr->pt.r10 is already -1 */ break; } case ERESTARTNOINTR: - ia64_decrement_ip(pt); +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(&scr->pt)) { + scr->pt.r8 = scr->pt.r1; + scr->pt.cr_iip -= 2; + } else +#endif + ia64_decrement_ip(&scr->pt); } } /* Whee! Actually deliver the signal. If the delivery failed, we need to continue to iterate in this loop so we can deliver the SIGSEGV... */ - if (handle_signal(signr, ka, &info, oldset, pt)) + if (handle_signal(signr, ka, &info, oldset, scr)) return 1; } /* Did we come from a system call? */ if (restart) { /* Restart the system call - no handlers present */ - if (pt->r8 == ERESTARTNOHAND || - pt->r8 == ERESTARTSYS || - pt->r8 == ERESTARTNOINTR) { + if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR) { +#ifdef CONFIG_IA32_SUPPORT + if (IS_IA32_PROCESS(&scr->pt)) { + scr->pt.r8 = scr->pt.r1; + scr->pt.cr_iip -= 2; + } else +#endif /* * Note: the syscall number is in r15 which is * saved in pt_regs so all we need to do here * is adjust ip so that the "break" * instruction gets re-executed. */ - ia64_decrement_ip(pt); + ia64_decrement_ip(&scr->pt); } } return 0; diff -urN linux-2.4.0-test1/arch/ia64/kernel/smp.c linux-2.4.0-test1-lia/arch/ia64/kernel/smp.c --- linux-2.4.0-test1/arch/ia64/kernel/smp.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/smp.c Fri Jun 9 17:49:36 2000 @@ -21,11 +21,13 @@ #include #include #include +#include #include #include #include #include + #include #include #include @@ -39,6 +41,7 @@ extern int cpu_idle(void * unused); extern void _start(void); +extern void machine_halt(void); extern int cpu_now_booting; /* Used by head.S to find idle task */ extern volatile unsigned long cpu_online_map; /* Bitmap of available cpu's */ @@ -66,15 +69,18 @@ atomic_t unstarted_count; atomic_t unfinished_count; }; -static struct smp_call_struct *smp_call_function_data; +static volatile struct smp_call_struct *smp_call_function_data; -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#ifdef CONFIG_ITANIUM_A1_SPECIFIC extern spinlock_t ivr_read_lock; #endif #define IPI_RESCHEDULE 0 #define IPI_CALL_FUNC 1 #define IPI_CPU_STOP 2 +#ifndef CONFIG_ITANIUM_PTCG +# define IPI_FLUSH_TLB 3 +#endif /*!CONFIG_ITANIUM_PTCG */ /* * Setup routine for controlling SMP activation @@ -126,6 +132,22 @@ } +static inline int +pointer_lock(void *lock, void *data, int retry) +{ + again: + if (cmpxchg_acq((void **) lock, 0, data) == 0) + return 0; + + if (!retry) + return -EBUSY; + + while (*(void **) lock) + ; + + goto again; +} + void handle_IPI(int irq, void *dev_id, struct pt_regs *regs) { @@ -160,13 +182,14 @@ void *info; int wait; + /* release the 'pointer lock' */ data = smp_call_function_data; func = data->func; info = data->info; wait = data->wait; mb(); - atomic_dec (&data->unstarted_count); + atomic_dec(&data->unstarted_count); /* At this point the structure may be gone unless wait is true. */ (*func)(info); @@ -174,7 +197,7 @@ /* Notify the sending CPU that the task is done. */ mb(); if (wait) - atomic_dec (&data->unfinished_count); + atomic_dec(&data->unfinished_count); } break; @@ -182,6 +205,51 @@ halt_processor(); break; +#ifndef CONFIG_ITANIUM_PTCG + case IPI_FLUSH_TLB: + { + extern unsigned long flush_start, flush_end, flush_nbits, flush_rid; + extern atomic_t flush_cpu_count; + unsigned long saved_rid = ia64_get_rr(flush_start); + unsigned long end = flush_end; + unsigned long start = flush_start; + unsigned long nbits = flush_nbits; + + /* + * Current CPU may be running with different + * RID so we need to reload the RID of flushed + * address. Purging the translation also + * needs ALAT invalidation; we do not need + * "invala" here since it is done in + * ia64_leave_kernel. + */ + ia64_srlz_d(); + if (saved_rid != flush_rid) { + ia64_set_rr(flush_start, flush_rid); + ia64_srlz_d(); + } + + do { + /* + * Purge local TLB entries. + */ + __asm__ __volatile__ ("ptc.l %0,%1" :: + "r"(start), "r"(nbits<<2) : "memory"); + start += (1UL << nbits); + } while (start < end); + + ia64_insn_group_barrier(); + ia64_srlz_i(); /* srlz.i implies srlz.d */ + + if (saved_rid != flush_rid) { + ia64_set_rr(flush_start, saved_rid); + ia64_srlz_d(); + } + atomic_dec(&flush_cpu_count); + break; + } +#endif /* !CONFIG_ITANIUM_PTCG */ + default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; @@ -199,7 +267,7 @@ if (dest_cpu == -1) return; - ipi_op[dest_cpu] |= (1 << op); + set_bit(op, &ipi_op[dest_cpu]); ipi_send(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0); } @@ -243,6 +311,14 @@ send_IPI_allbutself(IPI_CPU_STOP); } +#ifndef CONFIG_ITANIUM_PTCG +void +smp_send_flush_tlb(void) +{ + send_IPI_allbutself(IPI_FLUSH_TLB); +} +#endif /* !CONFIG_ITANIUM_PTCG */ + /* * Run a function on all other CPUs. * The function to run. This must be fast and non-blocking. @@ -260,63 +336,35 @@ { struct smp_call_struct data; long timeout; - static spinlock_t lock = SPIN_LOCK_UNLOCKED; + int cpus = smp_num_cpus - 1; + + if (cpus == 0) + return 0; data.func = func; data.info = info; data.wait = wait; - atomic_set(&data.unstarted_count, smp_num_cpus - 1); - atomic_set(&data.unfinished_count, smp_num_cpus - 1); + atomic_set(&data.unstarted_count, cpus); + atomic_set(&data.unfinished_count, cpus); - if (retry) { - while (1) { - if (smp_call_function_data) { - schedule (); /* Give a mate a go */ - continue; - } - spin_lock (&lock); - if (smp_call_function_data) { - spin_unlock (&lock); /* Bad luck */ - continue; - } - /* Mine, all mine! */ - break; - } - } - else { - if (smp_call_function_data) - return -EBUSY; - spin_lock (&lock); - if (smp_call_function_data) { - spin_unlock (&lock); - return -EBUSY; - } - } + if (pointer_lock(&smp_call_function_data, &data, retry)) + return -EBUSY; - smp_call_function_data = &data; - spin_unlock (&lock); - data.func = func; - data.info = info; - atomic_set (&data.unstarted_count, smp_num_cpus - 1); - data.wait = wait; - if (wait) - atomic_set (&data.unfinished_count, smp_num_cpus - 1); - /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(IPI_CALL_FUNC); /* Wait for response */ timeout = jiffies + HZ; - while ( (atomic_read (&data.unstarted_count) > 0) && - time_before (jiffies, timeout) ) - barrier (); - if (atomic_read (&data.unstarted_count) > 0) { + while ((atomic_read(&data.unstarted_count) > 0) && time_before(jiffies, timeout)) + barrier(); + if (atomic_read(&data.unstarted_count) > 0) { smp_call_function_data = NULL; return -ETIMEDOUT; } if (wait) - while (atomic_read (&data.unfinished_count) > 0) - barrier (); + while (atomic_read(&data.unfinished_count) > 0) + barrier(); + /* unlock pointer */ smp_call_function_data = NULL; return 0; } @@ -382,17 +430,21 @@ } } - -/* - * Called by both boot and secondaries to move global data into - * per-processor storage. - */ static inline void __init -smp_store_cpu_info(int cpuid) +smp_calibrate_delay(int cpuid) { struct cpuinfo_ia64 *c = &cpu_data[cpuid]; - - identify_cpu(c); +#if 0 + unsigned long old = loops_per_sec; + extern void calibrate_delay(void); + + loops_per_sec = 0; + calibrate_delay(); + c->loops_per_sec = loops_per_sec; + loops_per_sec = old; +#else + c->loops_per_sec = loops_per_sec; +#endif } /* @@ -446,34 +498,26 @@ extern void ia64_init_itm(void); extern void ia64_cpu_local_tick(void); - ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP); - ia64_set_fpu_owner(0); - ia64_rid_init(); /* initialize region ids */ - cpu_init(); - __flush_tlb_all(); - smp_store_cpu_info(smp_processor_id()); smp_setup_percpu_timer(smp_processor_id()); - if (test_and_set_bit(smp_processor_id(), &cpu_online_map)) { - printk("CPU#%d already initialized!\n", smp_processor_id()); - machine_halt(); - } - while (!smp_threads_ready) - mb(); - - normal_xtp(); - /* setup the CPU local timer tick */ - ia64_cpu_local_tick(); + ia64_init_itm(); /* Disable all local interrupts */ ia64_set_lrr0(0, 1); ia64_set_lrr1(0, 1); - __sti(); /* Interrupts have been off till now. */ + if (test_and_set_bit(smp_processor_id(), &cpu_online_map)) { + printk("CPU#%d already initialized!\n", smp_processor_id()); + machine_halt(); + } + while (!smp_threads_ready) + mb(); + local_irq_enable(); /* Interrupts have been off until now */ + smp_calibrate_delay(smp_processor_id()); printk("SMP: CPU %d starting idle loop\n", smp_processor_id()); cpu_idle(NULL); @@ -583,16 +627,8 @@ /* Setup BSP mappings */ __cpu_number_map[bootstrap_processor] = 0; __cpu_logical_map[0] = bootstrap_processor; - current->processor = bootstrap_processor; - - /* Mark BSP booted and get active_mm context */ - cpu_init(); - - /* reset XTP for interrupt routing */ - normal_xtp(); - /* And generate an entry in cpu_data */ - smp_store_cpu_info(bootstrap_processor); + smp_calibrate_delay(smp_processor_id()); #if 0 smp_tune_scheduling(); #endif diff -urN linux-2.4.0-test1/arch/ia64/kernel/sys_ia64.c linux-2.4.0-test1-lia/arch/ia64/kernel/sys_ia64.c --- linux-2.4.0-test1/arch/ia64/kernel/sys_ia64.c Sun Feb 13 10:30:38 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/sys_ia64.c Fri Jun 9 17:18:00 2000 @@ -156,6 +156,9 @@ { struct pt_regs *regs = (struct pt_regs *) &stack; + if ((off & ~PAGE_MASK) != 0) + return -EINVAL; + addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR(addr)) regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ @@ -196,6 +199,20 @@ return -ENOSYS; } +asmlinkage unsigned long +ia64_create_module (const char *name_user, size_t size, long arg2, long arg3, + long arg4, long arg5, long arg6, long arg7, long stack) +{ + extern unsigned long sys_create_module (const char *, size_t); + struct pt_regs *regs = (struct pt_regs *) &stack; + unsigned long addr; + + addr = sys_create_module (name_user, size); + if (!IS_ERR(addr)) + regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */ + return addr; +} + #ifndef CONFIG_PCI asmlinkage long @@ -211,6 +228,5 @@ { return -ENOSYS; } - #endif /* CONFIG_PCI */ diff -urN linux-2.4.0-test1/arch/ia64/kernel/time.c linux-2.4.0-test1-lia/arch/ia64/kernel/time.c --- linux-2.4.0-test1/arch/ia64/kernel/time.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/time.c Fri Jun 9 17:18:07 2000 @@ -34,7 +34,10 @@ static struct { unsigned long delta; - unsigned long next[NR_CPUS]; + union { + unsigned long count; + unsigned char pad[SMP_CACHE_BYTES]; + } next[NR_CPUS]; } itm; static void @@ -69,16 +72,27 @@ static inline unsigned long gettimeoffset (void) { - unsigned long now = ia64_get_itc(); - unsigned long elapsed_cycles, lost; - - elapsed_cycles = now - (itm.next[smp_processor_id()] - itm.delta); - - lost = lost_ticks; - if (lost) - elapsed_cycles += lost*itm.delta; +#ifdef CONFIG_SMP + /* + * The code below doesn't work for SMP because only CPU 0 + * keeps track of the time. + */ + return 0; +#else + unsigned long now = ia64_get_itc(), last_tick; + unsigned long elapsed_cycles, lost = lost_ticks; + last_tick = (itm.next[smp_processor_id()].count - (lost+1)*itm.delta); +# if 1 + if ((long) (now - last_tick) < 0) { + printk("Yikes: now < last_tick (now=0x%lx,last_tick=%lx)! No can do.\n", + now, last_tick); + return 0; + } +# endif + elapsed_cycles = now - last_tick; return (elapsed_cycles*my_cpu_data.usec_per_cyc) >> IA64_USEC_PER_CYC_SHIFT; +#endif } void @@ -137,6 +151,7 @@ static unsigned long last_time; static unsigned char count; int cpu = smp_processor_id(); + unsigned long new_itm; int printed = 0; /* @@ -146,6 +161,12 @@ * xtime_lock. */ write_lock(&xtime_lock); + new_itm = itm.next[cpu].count; + + if (!time_after(ia64_get_itc(), new_itm)) + printk("Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", + ia64_get_itc(), new_itm); + while (1) { /* * Do kernel PC profiling here. We multiply the @@ -164,18 +185,10 @@ do_timer(regs); #endif - itm.next[cpu] += itm.delta; - /* - * There is a race condition here: to be on the "safe" - * side, we process timer ticks until itm.next is - * ahead of the itc by at least half the timer - * interval. This should give us enough time to set - * the new itm value without losing a timer tick. - */ - if (time_after(itm.next[cpu], ia64_get_itc() + itm.delta/2)) { - ia64_set_itm(itm.next[cpu]); + new_itm += itm.delta; + itm.next[cpu].count = new_itm; + if (time_after(new_itm, ia64_get_itc())) break; - } #if !(defined(CONFIG_IA64_SOFTSDV_HACKS) && defined(CONFIG_SMP)) /* @@ -188,28 +201,39 @@ last_time = jiffies; if (!printed) { printk("Lost clock tick on CPU %d (now=%lx, next=%lx)!!\n", - cpu, ia64_get_itc(), itm.next[cpu]); + cpu, ia64_get_itc(), itm.next[cpu].count); printed = 1; - } # ifdef CONFIG_IA64_DEBUG_IRQ - printk("last_cli_ip=%lx\n", last_cli_ip); + printk("last_cli_ip=%lx\n", last_cli_ip); # endif + } } #endif } write_unlock(&xtime_lock); + + /* + * If we're too close to the next clock tick for comfort, we + * increase the saftey margin by intentionally dropping the + * next tick(s). We do NOT update itm.next accordingly + * because that would force us to call do_timer() which in + * turn would let our clock run too fast (with the potentially + * devastating effect of losing monotony of time). + */ + while (!time_after(new_itm, ia64_get_itc() + itm.delta/2)) + new_itm += itm.delta; + ia64_set_itm(new_itm); } -#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC +#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS) -void +/* + * Interrupts must be disabled before calling this routine. + */ +void ia64_reset_itm (void) { - unsigned long flags; - - local_irq_save(flags); timer_interrupt(0, 0, ia64_task_regs(current)); - local_irq_restore(flags); } #endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */ @@ -220,11 +244,14 @@ void __init ia64_cpu_local_tick(void) { +#ifdef CONFIG_IA64_SOFTSDV_HACKS + ia64_set_itc(0); +#endif + /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(TIMER_IRQ, 0); - ia64_set_itc(0); - itm.next[smp_processor_id()] = ia64_get_itc() + itm.delta; - ia64_set_itm(itm.next[smp_processor_id()]); + itm.next[smp_processor_id()].count = ia64_get_itc() + itm.delta; + ia64_set_itm(itm.next[smp_processor_id()].count); } void __init @@ -254,25 +281,7 @@ itc_ratio.num = 3; itc_ratio.den = 1; } -#if defined(CONFIG_IA64_LION_HACKS) - /* Our Lion currently returns base freq 104.857MHz, which - ain't right (it really is 100MHz). */ - printk("SAL/PAL returned: base-freq=%lu, itc-ratio=%lu/%lu, proc-ratio=%lu/%lu\n", - platform_base_freq, itc_ratio.num, itc_ratio.den, - proc_ratio.num, proc_ratio.den); - platform_base_freq = 100000000; -#elif 0 && defined(CONFIG_IA64_BIGSUR_HACKS) - /* BigSur with 991020 firmware returned itc-ratio=9/2 and base - freq 75MHz, which wasn't right. The 991119 firmware seems - to return the right values, so this isn't necessary - anymore... */ - printk("SAL/PAL returned: base-freq=%lu, itc-ratio=%lu/%lu, proc-ratio=%lu/%lu\n", - platform_base_freq, itc_ratio.num, itc_ratio.den, - proc_ratio.num, proc_ratio.den); - platform_base_freq = 100000000; - proc_ratio.num = 5; proc_ratio.den = 1; - itc_ratio.num = 5; itc_ratio.den = 1; -#elif defined(CONFIG_IA64_SOFTSDV_HACKS) +#ifdef CONFIG_IA64_SOFTSDV_HACKS platform_base_freq = 10000000; proc_ratio.num = 4; proc_ratio.den = 1; itc_ratio.num = 4; itc_ratio.den = 1; @@ -290,8 +299,9 @@ itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; itm.delta = itc_freq / HZ; - printk("timer: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, ITC freq=%lu.%03luMHz\n", - platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, + printk("timer: CPU %d base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, ITC freq=%lu.%03luMHz\n", + smp_processor_id(), + platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); my_cpu_data.proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; @@ -313,6 +323,8 @@ time_init (void) { /* we can't do request_irq() here because the kmalloc() would fail... */ + irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU; + irq_desc[TIMER_IRQ].handler = &irq_type_ia64_sapic; setup_irq(TIMER_IRQ, &timer_irqaction); efi_gettimeofday(&xtime); diff -urN linux-2.4.0-test1/arch/ia64/kernel/traps.c linux-2.4.0-test1-lia/arch/ia64/kernel/traps.c --- linux-2.4.0-test1/arch/ia64/kernel/traps.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/traps.c Fri Jun 9 17:49:52 2000 @@ -3,8 +3,12 @@ * * Copyright (C) 1998-2000 Hewlett-Packard Co * Copyright (C) 1998-2000 David Mosberger-Tang + * + * 05/12/00 grao : added isr in siginfo for SIGFPE */ +#define FPSWA_DEBUG 1 + /* * The fpu_fault() handler needs to be able to access and update all * floating point registers. Those saved in pt_regs can be accessed @@ -300,6 +304,7 @@ if (copy_from_user(bundle, (void *) fault_ip, sizeof(bundle))) return -1; +#ifdef FPSWA_DEBUG if (fpu_swa_count > 5 && jiffies - last_time > 5*HZ) fpu_swa_count = 0; if (++fpu_swa_count < 5) { @@ -307,7 +312,7 @@ printk("%s(%d): floating-point assist fault at ip %016lx\n", current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri); } - +#endif exception = fp_emulate(fp_fault, bundle, ®s->cr_ipsr, ®s->ar_fpsr, &isr, ®s->pr, ®s->cr_ifs, regs); if (fp_fault) { @@ -331,6 +336,7 @@ } else if (isr & 0x44) { siginfo.si_code = FPE_FLTDIV; } + siginfo.si_isr = isr; send_sig_info(SIGFPE, &siginfo, current); } } else { @@ -350,12 +356,49 @@ } else if (isr & 0x2200) { siginfo.si_code = FPE_FLTRES; } + siginfo.si_isr = isr; send_sig_info(SIGFPE, &siginfo, current); } } return 0; } +struct illegal_op_return { + unsigned long fkt, arg1, arg2, arg3; +}; + +struct illegal_op_return +ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, unsigned long arg5, + unsigned long arg6, unsigned long arg7, unsigned long stack) +{ + struct pt_regs *regs = (struct pt_regs *) &stack; + struct illegal_op_return rv; + struct siginfo si; + char buf[128]; + +#ifdef CONFIG_IA64_BRL_EMU + { + extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long); + + rv = ia64_emulate_brl(regs, ec); + if (rv.fkt != (unsigned long) -1) + return rv; + } +#endif + + sprintf(buf, "IA-64 Illegal operation fault"); + die_if_kernel(buf, regs, 0); + + memset(&si, 0, sizeof(si)); + si.si_signo = SIGILL; + si.si_code = ILL_ILLOPC; + si.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri); + force_sig_info(SIGILL, &si, current); + rv.fkt = 0; + return rv; +} + void ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, unsigned long iim, unsigned long itir, unsigned long arg5, @@ -449,11 +492,6 @@ siginfo.si_errno = 0; force_sig_info(SIGTRAP, &siginfo, current); return; - - case 30: /* Unaligned fault */ - sprintf(buf, "Kernel unaligned trap accessing %016lx (ip=%016lx)!", - ifa, regs->cr_iip + ia64_psr(regs)->ri); - break; case 32: /* fp fault */ case 33: /* fp trap */ diff -urN linux-2.4.0-test1/arch/ia64/kernel/unaligned.c linux-2.4.0-test1-lia/arch/ia64/kernel/unaligned.c --- linux-2.4.0-test1/arch/ia64/kernel/unaligned.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/unaligned.c Thu Jun 1 01:07:40 2000 @@ -1,8 +1,8 @@ /* * Architecture-specific unaligned trap handling. * - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 Stephane Eranian + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 Stephane Eranian */ #include #include @@ -1410,6 +1410,25 @@ die_if_kernel("Unaligned reference while in kernel\n", regs, 30); /* NOT_REACHED */ } + /* + * For now, we don't support user processes running big-endian + * which do unaligned accesses + */ + if (ia64_psr(regs)->be) { + struct siginfo si; + + printk(KERN_ERR "%s(%d): big-endian unaligned access %016lx (ip=%016lx) not " + "yet supported\n", + current->comm, current->pid, ifa, regs->cr_iip + ipsr->ri); + + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_ADRALN; + si.si_addr = (void *) ifa; + send_sig_info(SIGBUS, &si, current); + return; + } + if (current->thread.flags & IA64_THREAD_UAC_SIGBUS) { struct siginfo si; @@ -1417,7 +1436,7 @@ si.si_errno = 0; si.si_code = BUS_ADRALN; si.si_addr = (void *) ifa; - send_sig_info (SIGBUS, &si, current); + send_sig_info(SIGBUS, &si, current); return; } diff -urN linux-2.4.0-test1/arch/ia64/kernel/unwind.c linux-2.4.0-test1-lia/arch/ia64/kernel/unwind.c --- linux-2.4.0-test1/arch/ia64/kernel/unwind.c Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/unwind.c Fri Jun 9 17:18:53 2000 @@ -1,16 +1,1709 @@ /* - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 David Mosberger-Tang + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger-Tang */ +/* + * SMP conventions: + * o updates to the global unwind data (in structure "unw") are serialized + * by the unw.lock spinlock + * o each unwind script has its own read-write lock; a thread must acquire + * a read lock before executing a script and must acquire a write lock + * before modifying a script + * o if both the unw.lock spinlock and a script's read-write lock must be + * acquired, then the read-write lock must be acquired first. + */ +#include #include #include +#include #include +#ifdef CONFIG_IA64_NEW_UNWIND + +#include +#include +#include +#include +#include + +#include "entry.h" +#include "unwind_i.h" + +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#define p5 5 + +/* + * The unwind tables are supposed to be sorted, but the GNU toolchain + * currently fails to produce a sorted table in the presence of + * functions that go into sections other than .text. For example, the + * kernel likes to put initialization code into .text.init, which + * messes up the sort order. Hopefully, this will get fixed sometime + * soon. --davidm 00/05/23 + */ +#define UNWIND_TABLE_SORT_BUG + +#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */ +#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE) + +#define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1) +#define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE) + +#define UNW_DEBUG 1 +#define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */ + +#if UNW_DEBUG +# define dprintk(format...) printk(format) +# define inline +#else +# define dprintk(format...) +#endif + +#if UNW_STATS +# define STAT(x...) x +#else +# define STAT(x...) +#endif + +#define alloc_reg_state() kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC) +#define free_reg_state(usr) kfree(usr) + +typedef unsigned long unw_word; +typedef unsigned char unw_hash_index_t; + +#define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0) + +static struct { + struct unw_table *tables; + + const unsigned char save_order[8]; + /* Maps a preserved register index (preg_index) to corresponding switch_stack offset: */ + unsigned short sw_off[sizeof(struct unw_frame_info) / 8]; + + unsigned short lru_head; + unsigned short lru_tail; + unsigned short preg_index[UNW_NUM_REGS]; + struct unw_table kernel_table; + + spinlock_t lock; + unw_hash_index_t hash[UNW_HASH_SIZE]; + struct unw_script cache[UNW_CACHE_SIZE]; + +# if UNW_DEBUG + const char *preg_name[UNW_NUM_REGS]; +# endif +# if UNW_STATS + struct { + struct { + int lookups; + int hinted_hits; + int normal_hits; + int collision_chain_traversals; + } cache; + struct { + unsigned long build_time; + unsigned long run_time; + unsigned long parse_time; + int builds; + int news; + int collisions; + int runs; + } script; + struct { + unsigned long init_time; + unsigned long unwind_time; + int inits; + int unwinds; + } api; + } stat; +# endif +} unw = { + tables: &unw.kernel_table, + lock: SPIN_LOCK_UNLOCKED, + save_order: { + UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, + UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR + }, + preg_index: { + struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_GR */ + struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_MEM */ + struct_offset(struct unw_frame_info, pbsp)/8, + struct_offset(struct unw_frame_info, bspstore)/8, + struct_offset(struct unw_frame_info, pfs)/8, + struct_offset(struct unw_frame_info, rnat)/8, + struct_offset(struct unw_frame_info, psp)/8, + struct_offset(struct unw_frame_info, rp)/8, + struct_offset(struct unw_frame_info, r4)/8, + struct_offset(struct unw_frame_info, r5)/8, + struct_offset(struct unw_frame_info, r6)/8, + struct_offset(struct unw_frame_info, r7)/8, + struct_offset(struct unw_frame_info, unat)/8, + struct_offset(struct unw_frame_info, pr)/8, + struct_offset(struct unw_frame_info, lc)/8, + struct_offset(struct unw_frame_info, fpsr)/8, + struct_offset(struct unw_frame_info, b1)/8, + struct_offset(struct unw_frame_info, b2)/8, + struct_offset(struct unw_frame_info, b3)/8, + struct_offset(struct unw_frame_info, b4)/8, + struct_offset(struct unw_frame_info, b5)/8, + struct_offset(struct unw_frame_info, f2)/8, + struct_offset(struct unw_frame_info, f3)/8, + struct_offset(struct unw_frame_info, f4)/8, + struct_offset(struct unw_frame_info, f5)/8, + struct_offset(struct unw_frame_info, fr[16 - 16])/8, + struct_offset(struct unw_frame_info, fr[17 - 16])/8, + struct_offset(struct unw_frame_info, fr[18 - 16])/8, + struct_offset(struct unw_frame_info, fr[19 - 16])/8, + struct_offset(struct unw_frame_info, fr[20 - 16])/8, + struct_offset(struct unw_frame_info, fr[21 - 16])/8, + struct_offset(struct unw_frame_info, fr[22 - 16])/8, + struct_offset(struct unw_frame_info, fr[23 - 16])/8, + struct_offset(struct unw_frame_info, fr[24 - 16])/8, + struct_offset(struct unw_frame_info, fr[25 - 16])/8, + struct_offset(struct unw_frame_info, fr[26 - 16])/8, + struct_offset(struct unw_frame_info, fr[27 - 16])/8, + struct_offset(struct unw_frame_info, fr[28 - 16])/8, + struct_offset(struct unw_frame_info, fr[29 - 16])/8, + struct_offset(struct unw_frame_info, fr[30 - 16])/8, + struct_offset(struct unw_frame_info, fr[31 - 16])/8, + }, + hash : { [0 ... UNW_HASH_SIZE - 1] = -1 }, +#if UNW_DEBUG + preg_name: { + "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp", + "r4", "r5", "r6", "r7", + "ar.unat", "pr", "ar.lc", "ar.fpsr", + "b1", "b2", "b3", "b4", "b5", + "f2", "f3", "f4", "f5", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" + } +#endif +}; + + +/* Unwind accessors. */ + +int +unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) +{ + unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat; + struct unw_ireg *ireg; + struct pt_regs *pt; + + if ((unsigned) regnum - 1 >= 127) { + dprintk("unwind: trying to access non-existent r%u\n", regnum); + return -1; + } + + if (regnum < 32) { + if (regnum >= 4 && regnum <= 7) { + /* access a preserved register */ + ireg = &info->r4 + (regnum - 4); + addr = ireg->loc; + if (addr) { + nat_addr = addr + ireg->nat.off; + switch (ireg->nat.type) { + case UNW_NAT_VAL: + /* simulate getf.sig/setf.sig */ + if (write) { + if (*nat) { + /* write NaTVal and be done with it */ + addr[0] = 0; + addr[1] = 0x1fffe; + return 0; + } + addr[1] = 0x1003e; + } else { + if (addr[0] == 0 && addr[1] == 0x1ffe) { + /* return NaT and be done with it */ + *val = 0; + *nat = 1; + return 0; + } + } + /* fall through */ + case UNW_NAT_NONE: + nat_addr = &dummy_nat; + break; + + case UNW_NAT_SCRATCH: + if (info->unat) + nat_addr = info->unat; + else + nat_addr = &info->sw->caller_unat; + case UNW_NAT_PRI_UNAT: + nat_mask = (1UL << ((long) addr & 0x1f8)/8); + break; + + case UNW_NAT_STACKED: + nat_addr = ia64_rse_rnat_addr(addr); + if ((unsigned long) addr < info->regstk.limit + || (unsigned long) addr >= info->regstk.top) + { + dprintk("unwind: %lx outside of regstk " + "[0x%lx-0x%lx)\n", addr, + info->regstk.limit, info->regstk.top); + return -1; + } + if ((unsigned long) nat_addr >= info->regstk.top) + nat_addr = &info->sw->ar_rnat; + nat_mask = (1UL << ia64_rse_slot_num(addr)); + break; + } + } else { + addr = &info->sw->r4 + (regnum - 4); + nat_addr = &info->sw->ar_unat; + nat_mask = (1UL << ((long) addr & 0x1f8)/8); + } + } else { + /* access a scratch register */ + pt = (struct pt_regs *) info->sp - 1; + if (regnum <= 3) + addr = &pt->r1 + (regnum - 1); + else if (regnum <= 11) + addr = &pt->r8 + (regnum - 8); + else if (regnum <= 15) + addr = &pt->r12 + (regnum - 12); + else + addr = &pt->r16 + (regnum - 16); + if (info->unat) + nat_addr = info->unat; + else + nat_addr = &info->sw->caller_unat; + nat_mask = (1UL << ((long) addr & 0x1f8)/8); + } + } else { + /* access a stacked register */ + addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum); + nat_addr = ia64_rse_rnat_addr(addr); + if ((unsigned long) addr < info->regstk.limit + || (unsigned long) addr >= info->regstk.top) + { + dprintk("unwind: ignoring attempt to access register outside of rbs\n"); + return -1; + } + if ((unsigned long) nat_addr >= info->regstk.top) + nat_addr = &info->sw->ar_rnat; + nat_mask = (1UL << ia64_rse_slot_num(addr)); + } + + if (write) { + *addr = *val; + *nat_addr = (*nat_addr & ~nat_mask) | nat_mask; + } else { + *val = *addr; + *nat = (*nat_addr & nat_mask) != 0; + } + return 0; +} + +int +unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write) +{ + unsigned long *addr; + struct pt_regs *pt; + + pt = (struct pt_regs *) info->sp - 1; + switch (regnum) { + /* scratch: */ + case 0: addr = &pt->b0; break; + case 6: addr = &pt->b6; break; + case 7: addr = &pt->b7; break; + + /* preserved: */ + case 1: case 2: case 3: case 4: case 5: + addr = *(&info->b1 + (regnum - 1)); + if (!addr) + addr = &info->sw->b1 + (regnum - 1); + break; + + default: + dprintk("unwind: trying to access non-existent b%u\n", regnum); + return -1; + } + if (write) + *addr = *val; + else + *val = *addr; + return 0; +} + +int +unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) +{ + struct ia64_fpreg *addr = 0; + struct pt_regs *pt; + + if ((unsigned) (regnum - 2) >= 30) { + dprintk("unwind: trying to access non-existent f%u\n", regnum); + return -1; + } + + pt = (struct pt_regs *) info->sp - 1; + + if (regnum <= 5) { + addr = *(&info->f2 + (regnum - 2)); + if (!addr) + addr = &info->sw->f2 + (regnum - 2); + } else if (regnum <= 15) { + if (regnum <= 9) + addr = &pt->f6 + (regnum - 6); + else + addr = &info->sw->f10 + (regnum - 10); + } else if (regnum <= 31) { + addr = *(&info->fr[regnum - 16]); + if (!addr) + addr = &info->sw->f16 + (regnum - 16); + } + + if (write) + *addr = *val; + else + *val = *addr; + return 0; +} + +int +unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write) +{ + unsigned long *addr; + struct pt_regs *pt; + + pt = (struct pt_regs *) info->sp - 1; + + switch (regnum) { + case UNW_AR_BSP: + addr = info->pbsp; + if (!addr) + addr = &info->sw->ar_bspstore; + break; + + case UNW_AR_BSPSTORE: + addr = info->bspstore; + if (!addr) + addr = &info->sw->ar_bspstore; + break; + + case UNW_AR_PFS: + addr = info->pfs; + if (!addr) + addr = &info->sw->ar_pfs; + break; + + case UNW_AR_RNAT: + addr = info->rnat; + if (!addr) + addr = &info->sw->ar_rnat; + break; + + case UNW_AR_UNAT: + addr = info->unat; + if (!addr) + addr = &info->sw->ar_unat; + break; + + case UNW_AR_LC: + addr = info->lc; + if (!addr) + addr = &info->sw->ar_lc; + break; + + case UNW_AR_FPSR: + addr = info->fpsr; + if (!addr) + addr = &info->sw->ar_fpsr; + break; + + case UNW_AR_RSC: + addr = &pt->ar_rsc; + break; + + case UNW_AR_CCV: + addr = &pt->ar_ccv; + break; + + default: + dprintk("unwind: trying to access non-existent ar%u\n", regnum); + return -1; + } + + if (write) + *addr = *val; + else + *val = *addr; + return 0; +} + +inline int +unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write) +{ + unsigned long *addr; + + addr = info->pr; + if (!addr) + addr = &info->sw->pr; + + if (write) + *addr = *val; + else + *val = *addr; + return 0; +} + + +/* Unwind decoder routines */ + +static inline void +push (struct unw_state_record *sr) +{ + struct unw_reg_state *rs; + + rs = alloc_reg_state(); + memcpy(rs, &sr->curr, sizeof(*rs)); + rs->next = sr->stack; + sr->stack = rs; +} + +static void +pop (struct unw_state_record *sr) +{ + struct unw_reg_state *rs; + + if (!sr->stack) { + printk ("unwind: stack underflow!\n"); + return; + } + rs = sr->stack; + sr->stack = rs->next; + free_reg_state(rs); +} + +static enum unw_register_index __attribute__((const)) +decode_abreg (unsigned char abreg, int memory) +{ + switch (abreg) { + case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04); + case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22); + case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30); + case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41); + case 0x60: return UNW_REG_PR; + case 0x61: return UNW_REG_PSP; + case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR; + case 0x63: return UNW_REG_RP; + case 0x64: return UNW_REG_BSP; + case 0x65: return UNW_REG_BSPSTORE; + case 0x66: return UNW_REG_RNAT; + case 0x67: return UNW_REG_UNAT; + case 0x68: return UNW_REG_FPSR; + case 0x69: return UNW_REG_PFS; + case 0x6a: return UNW_REG_LC; + default: + break; + } + dprintk("unwind: bad abreg=0x%x\n", abreg); + return UNW_REG_LC; +} + +static void +set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val) +{ + reg->val = val; + reg->where = where; + if (reg->when == UNW_WHEN_NEVER) + reg->when = when; +} + +static void +alloc_spill_area (unsigned long *offp, unsigned long regsize, + struct unw_reg_info *lo, struct unw_reg_info *hi) +{ + struct unw_reg_info *reg; + + for (reg = hi; reg >= lo; --reg) { + if (reg->where == UNW_WHERE_SPILL_HOME) { + reg->where = UNW_WHERE_PSPREL; + reg->val = *offp; + *offp += regsize; + } + } +} + +static inline void +spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t) +{ + struct unw_reg_info *reg; + + for (reg = *regp; reg <= lim; ++reg) { + if (reg->where == UNW_WHERE_SPILL_HOME) { + reg->when = t; + *regp = reg + 1; + return; + } + } + dprintk("unwind: excess spill!\n"); +} + +static inline void +finish_prologue (struct unw_state_record *sr) +{ + struct unw_reg_info *reg; + unsigned long off; + int i; + + /* + * First, resolve implicit register save locations + * (see Section "11.4.2.3 Rules for Using Unwind + * Descriptors", rule 3): + */ + for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) { + reg = sr->curr.reg + unw.save_order[i]; + if (reg->where == UNW_WHERE_GR_SAVE) { + reg->where = UNW_WHERE_GR; + reg->val = sr->gr_save_loc++; + } + } + + /* + * Next, compute when the fp, general, and branch registers get + * saved. This must come before alloc_spill_area() because + * we need to know which registers are spilled to their home + * locations. + */ + if (sr->imask) { + unsigned char kind, mask = 0, *cp = sr->imask; + unsigned long t; + static const unsigned char limit[3] = { + UNW_REG_F31, UNW_REG_R7, UNW_REG_B5 + }; + struct unw_reg_info *(regs[3]); + + regs[0] = sr->curr.reg + UNW_REG_F2; + regs[1] = sr->curr.reg + UNW_REG_R4; + regs[2] = sr->curr.reg + UNW_REG_B1; + + for (t = 0; t < sr->region_len; ++t) { + if ((t & 3) == 0) + mask = *cp++; + kind = (mask >> 2*(3-(t & 3))) & 3; + if (kind > 0) + spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1], + sr->region_start + t); + } + } + /* + * Next, lay out the memory stack spill area: + */ + if (sr->any_spills) { + off = sr->spill_offset; + alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31); + alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5); + alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7); + } +} + +/* + * Region header descriptors. + */ + +static void +desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, + struct unw_state_record *sr) +{ + int i; + + if (!(sr->in_body || sr->first_region)) + finish_prologue(sr); + sr->first_region = 0; + + /* check if we're done: */ + if (body && sr->when_target < sr->region_start + sr->region_len) { + sr->done = 1; + return; + } + + for (i = 0; i < sr->epilogue_count; ++i) + pop(sr); + sr->epilogue_count = 0; + sr->epilogue_start = UNW_WHEN_NEVER; + + if (!body) + push(sr); + + sr->region_start += sr->region_len; + sr->region_len = rlen; + sr->in_body = body; + + if (!body) { + for (i = 0; i < 4; ++i) { + if (mask & 0x8) + set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR, + sr->region_start + sr->region_len - 1, grsave++); + mask <<= 1; + } + sr->gr_save_loc = grsave; + sr->any_spills = 0; + sr->imask = 0; + sr->spill_offset = 0x10; /* default to psp+16 */ + } +} + +/* + * Prologue descriptors. + */ + +static inline void +desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr) +{ + if (abi == 0 && context == 'i') + sr->flags |= UNW_FLAG_INTERRUPT_FRAME; + else + dprintk("unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context); +} + +static inline void +desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr) +{ + int i; + + for (i = 0; i < 5; ++i) { + if (brmask & 1) + set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR, + sr->region_start + sr->region_len - 1, gr++); + brmask >>= 1; + } +} + +static inline void +desc_br_mem (unsigned char brmask, struct unw_state_record *sr) +{ + int i; + + for (i = 0; i < 5; ++i) { + if (brmask & 1) { + set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME, + sr->region_start + sr->region_len - 1, 0); + sr->any_spills = 1; + } + brmask >>= 1; + } +} + +static inline void +desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr) +{ + int i; + + for (i = 0; i < 4; ++i) { + if ((grmask & 1) != 0) { + set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, + sr->region_start + sr->region_len - 1, 0); + sr->any_spills = 1; + } + grmask >>= 1; + } + for (i = 0; i < 20; ++i) { + if ((frmask & 1) != 0) { + set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, + sr->region_start + sr->region_len - 1, 0); + sr->any_spills = 1; + } + frmask >>= 1; + } +} + +static inline void +desc_fr_mem (unsigned char frmask, struct unw_state_record *sr) +{ + int i; + + for (i = 0; i < 4; ++i) { + if ((frmask & 1) != 0) { + set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, + sr->region_start + sr->region_len - 1, 0); + sr->any_spills = 1; + } + frmask >>= 1; + } +} + +static inline void +desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr) +{ + int i; + + for (i = 0; i < 4; ++i) { + if ((grmask & 1) != 0) + set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR, + sr->region_start + sr->region_len - 1, gr++); + grmask >>= 1; + } +} + +static inline void +desc_gr_mem (unsigned char grmask, struct unw_state_record *sr) +{ + int i; + + for (i = 0; i < 4; ++i) { + if ((grmask & 1) != 0) { + set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, + sr->region_start + sr->region_len - 1, 0); + sr->any_spills = 1; + } + grmask >>= 1; + } +} + +static inline void +desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr) +{ + set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE, + sr->region_start + MIN((int)t, sr->region_len - 1), 16*size); +} + +static inline void +desc_mem_stack_v (unw_word t, struct unw_state_record *sr) +{ + sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1); +} + +static inline void +desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr) +{ + set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst); +} + +static inline void +desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr) +{ + set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1, + 0x10 - 4*pspoff); +} + +static inline void +desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr) +{ + set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1, + 4*spoff); +} + +static inline void +desc_rp_br (unsigned char dst, struct unw_state_record *sr) +{ + sr->return_link_reg = dst; +} + +static inline void +desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr) +{ + struct unw_reg_info *reg = sr->curr.reg + regnum; + + if (reg->where == UNW_WHERE_NONE) + reg->where = UNW_WHERE_GR_SAVE; + reg->when = sr->region_start + MIN((int)t, sr->region_len - 1); +} + +static inline void +desc_spill_base (unw_word pspoff, struct unw_state_record *sr) +{ + sr->spill_offset = 0x10 - 4*pspoff; +} + +static inline unsigned char * +desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr) +{ + sr->imask = imaskp; + return imaskp + (2*sr->region_len + 7)/8; +} + +/* + * Body descriptors. + */ +static inline void +desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr) +{ + sr->epilogue_start = sr->region_start + sr->region_len - 1 - t; + sr->epilogue_count = ecount + 1; +} + +static inline void +desc_copy_state (unw_word label, struct unw_state_record *sr) +{ + struct unw_reg_state *rs; + + for (rs = sr->reg_state_list; rs; rs = rs->next) { + if (rs->label == label) { + memcpy (&sr->curr, rs, sizeof(sr->curr)); + return; + } + } + printk("unwind: failed to find state labelled 0x%lx\n", label); +} + +static inline void +desc_label_state (unw_word label, struct unw_state_record *sr) +{ + struct unw_reg_state *rs; + + rs = alloc_reg_state(); + memcpy(rs, &sr->curr, sizeof(*rs)); + rs->label = label; + rs->next = sr->reg_state_list; + sr->reg_state_list = rs; +} + +/* + * General descriptors. + */ + +static inline int +desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) +{ + if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1)) + return 0; + if (qp > 0) { + if ((sr->pr_val & (1UL << qp)) == 0) + return 0; + sr->pr_mask |= (1UL << qp); + } + return 1; +} + +static inline void +desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) +{ + struct unw_reg_info *r; + + if (!desc_is_active(qp, t, sr)) + return; + + r = sr->curr.reg + decode_abreg(abreg, 0); + r->where = UNW_WHERE_NONE; + r->when = sr->region_start + MIN((int)t, sr->region_len - 1); + r->val = 0; +} + +static inline void +desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, + unsigned char ytreg, struct unw_state_record *sr) +{ + enum unw_where where = UNW_WHERE_GR; + struct unw_reg_info *r; + + if (!desc_is_active(qp, t, sr)) + return; + + if (x) + where = UNW_WHERE_BR; + else if (ytreg & 0x80) + where = UNW_WHERE_FR; + + r = sr->curr.reg + decode_abreg(abreg, 0); + r->where = where; + r->when = sr->region_start + MIN((int)t, sr->region_len - 1); + r->val = (ytreg & 0x7f); +} + +static inline void +desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, + struct unw_state_record *sr) +{ + struct unw_reg_info *r; + + if (!desc_is_active(qp, t, sr)) + return; + + r = sr->curr.reg + decode_abreg(abreg, 1); + r->where = UNW_WHERE_PSPREL; + r->when = sr->region_start + MIN((int)t, sr->region_len - 1); + r->val = 0x10 - 4*pspoff; +} + +static inline void +desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, + struct unw_state_record *sr) +{ + struct unw_reg_info *r; + + if (!desc_is_active(qp, t, sr)) + return; + + r = sr->curr.reg + decode_abreg(abreg, 1); + r->where = UNW_WHERE_SPREL; + r->when = sr->region_start + MIN((int)t, sr->region_len - 1); + r->val = 4*spoff; +} + +#define UNW_DEC_BAD_CODE(code) printk("unwind: unknown code 0x%02x\n", code); + +/* + * region headers: + */ +#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) +#define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) +/* + * prologue descriptors: + */ +#define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) +#define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) +#define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) +#define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) +#define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) +#define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) +#define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) +#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) +#define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) +#define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) +#define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) +#define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) +#define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) +#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg) +#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg) +#define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg) +#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg) +#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg) +#define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) +#define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) +#define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) +/* + * body descriptors: + */ +#define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) +#define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) +#define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) +/* + * general unwind descriptors: + */ +#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) +#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) +#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg) +#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg) +#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) +#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) +#define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) +#define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) + +#include "unwind_decoder.c" + + +/* Unwind scripts. */ + +static inline unw_hash_index_t +hash (unsigned long ip) +{ +# define magic 0x9e3779b97f4a7c16 /* (sqrt(5)/2-1)*2^64 */ + + return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE); +} + +static inline long +cache_match (struct unw_script *script, unsigned long ip, unsigned long pr_val) +{ + read_lock(&script->lock); + if ((ip) == (script)->ip && (((pr_val) ^ (script)->pr_val) & (script)->pr_mask) == 0) + /* keep the read lock... */ + return 1; + read_unlock(&script->lock); + return 0; +} + +static inline struct unw_script * +script_lookup (struct unw_frame_info *info) +{ + struct unw_script *script = unw.cache + info->hint; + unsigned long ip, pr_val; + + STAT(++unw.stat.cache.lookups); + + ip = info->ip; + pr_val = info->pr_val; + + if (cache_match(script, ip, pr_val)) { + STAT(++unw.stat.cache.hinted_hits); + return script; + } + + script = unw.cache + unw.hash[hash(ip)]; + while (1) { + if (cache_match(script, ip, pr_val)) { + /* update hint; no locking required as single-word writes are atomic */ + STAT(++unw.stat.cache.normal_hits); + unw.cache[info->prev_script].hint = script - unw.cache; + return script; + } + if (script->coll_chain >= UNW_HASH_SIZE) + return 0; + script = unw.cache + script->coll_chain; + STAT(++unw.stat.cache.collision_chain_traversals); + } +} + +/* + * On returning, a write lock for the SCRIPT is still being held. + */ +static inline struct unw_script * +script_new (unsigned long ip) +{ + struct unw_script *script, *prev, *tmp; + unsigned short head; + unsigned long flags; + unsigned char index; + + STAT(++unw.stat.script.news); + + /* + * Atomically fetch the least recently used script. We can't + * do this via unw.lock because we also need to acquire the + * script's lock and to avoid deadlock, we must acquire the + * latter before the former. + */ + do { + head = unw.lru_head; + } while (cmpxchg(&unw.lru_head, head, unw.cache[head].lru_chain) != head); + + script = unw.cache + head; + + write_lock(&script->lock); + + spin_lock_irqsave(&unw.lock, flags); + { + /* re-insert script at the tail of the LRU chain: */ + unw.cache[unw.lru_tail].lru_chain = head; + unw.lru_tail = head; + + /* remove the old script from the hash table (if it's there): */ + index = hash(script->ip); + tmp = unw.cache + unw.hash[index]; + prev = 0; + while (1) { + if (tmp == script) { + if (prev) + prev->coll_chain = tmp->coll_chain; + else + unw.hash[index] = tmp->coll_chain; + break; + } else + prev = tmp; + if (tmp->coll_chain >= UNW_CACHE_SIZE) + /* old script wasn't in the hash-table */ + break; + tmp = unw.cache + tmp->coll_chain; + } + + /* enter new script in the hash table */ + index = hash(ip); + script->coll_chain = unw.hash[index]; + unw.hash[index] = script - unw.cache; + + script->ip = ip; /* set new IP while we're holding the locks */ + + STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions); + } + spin_unlock_irqrestore(&unw.lock, flags); + + script->flags = 0; + script->hint = 0; + script->count = 0; + return script; +} + +static void +script_finalize (struct unw_script *script, struct unw_state_record *sr) +{ + script->pr_mask = sr->pr_mask; + script->pr_val = sr->pr_val; + /* + * We could down-grade our write-lock on script->lock here but + * the rwlock API doesn't offer atomic lock downgrading, so + * we'll just keep the write-lock and release it later when + * we're done using the script. + */ +} + +static inline void +script_emit (struct unw_script *script, struct unw_insn insn) +{ + if (script->count >= UNW_MAX_SCRIPT_LEN) { + dprintk("unwind: script exceeds maximum size of %u instructions!\n", + UNW_MAX_SCRIPT_LEN); + return; + } + script->insn[script->count++] = insn; +} + +static inline void +emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script) +{ + struct unw_reg_info *r = sr->curr.reg + i; + enum unw_insn_opcode opc; + struct unw_insn insn; + unsigned long val; + + switch (r->where) { + case UNW_WHERE_GR: + if (r->val >= 32) { + /* register got spilled to a stacked register */ + opc = UNW_INSN_SETNAT_TYPE; + val = UNW_NAT_STACKED; + } else { + /* register got spilled to a scratch register */ + opc = UNW_INSN_SETNAT_TYPE; + val = UNW_NAT_SCRATCH; + } + break; + + case UNW_WHERE_FR: + opc = UNW_INSN_SETNAT_TYPE; + val = UNW_NAT_VAL; + break; + + case UNW_WHERE_BR: + opc = UNW_INSN_SETNAT_TYPE; + val = UNW_NAT_NONE; + break; + + case UNW_WHERE_PSPREL: + case UNW_WHERE_SPREL: + opc = UNW_INSN_SETNAT_PRI_UNAT; + val = 0; + break; + + default: + dprintk("unwind: don't know how to emit nat info for where = %u\n", r->where); + return; + } + insn.opc = opc; + insn.dst = unw.preg_index[i]; + insn.val = val; + script_emit(script, insn); +} + +static void +compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) +{ + struct unw_reg_info *r = sr->curr.reg + i; + enum unw_insn_opcode opc; + unsigned long val, rval; + struct unw_insn insn; + long need_nat_info; + + if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) + return; + + opc = UNW_INSN_MOVE; + val = rval = r->val; + need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); + + switch (r->where) { + case UNW_WHERE_GR: + if (rval >= 32) { + opc = UNW_INSN_MOVE_STACKED; + val = rval - 32; + } else if (rval >= 4 && rval <= 7) { + if (need_nat_info) { + opc = UNW_INSN_MOVE2; + need_nat_info = 0; + } + val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; + } else { + opc = UNW_INSN_LOAD_SPREL; + val = -sizeof(struct pt_regs); + if (rval >= 1 && rval <= 3) + val += struct_offset(struct pt_regs, r1) + 8*(rval - 1); + else if (rval <= 11) + val += struct_offset(struct pt_regs, r8) + 8*(rval - 8); + else if (rval <= 15) + val += struct_offset(struct pt_regs, r12) + 8*(rval - 12); + else if (rval <= 31) + val += struct_offset(struct pt_regs, r16) + 8*(rval - 16); + else + dprintk("unwind: bad scratch reg r%lu\n", rval); + } + break; + + case UNW_WHERE_FR: + if (rval <= 5) + val = unw.preg_index[UNW_REG_F2 + (rval - 1)]; + else if (rval >= 16 && rval <= 31) + val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; + else { + opc = UNW_INSN_LOAD_SPREL; + val = -sizeof(struct pt_regs); + if (rval <= 9) + val += struct_offset(struct pt_regs, f6) + 16*(rval - 6); + else + dprintk("unwind: kernel may not touch f%lu\n", rval); + } + break; + + case UNW_WHERE_BR: + if (rval >= 1 && rval <= 5) + val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; + else { + opc = UNW_INSN_LOAD_SPREL; + val = -sizeof(struct pt_regs); + if (rval == 0) + val += struct_offset(struct pt_regs, b0); + else if (rval == 6) + val += struct_offset(struct pt_regs, b6); + else + val += struct_offset(struct pt_regs, b7); + } + break; + + case UNW_WHERE_SPREL: + opc = UNW_INSN_LOAD_SPREL; + break; + + case UNW_WHERE_PSPREL: + opc = UNW_INSN_LOAD_PSPREL; + break; + + default: + dprintk("unwind: register %u has unexpected `where' value of %u\n", i, r->where); + break; + } + insn.opc = opc; + insn.dst = unw.preg_index[i]; + insn.val = val; + script_emit(script, insn); + if (need_nat_info) + emit_nat_info(sr, i, script); +} + +static inline struct unw_table_entry * +lookup (struct unw_table *table, unsigned long rel_ip) +{ + struct unw_table_entry *e = 0; + unsigned long lo, hi, mid; + + /* do a binary search for right entry: */ + for (lo = 0, hi = table->length; lo < hi; ) { + mid = (lo + hi) / 2; + e = &table->array[mid]; + if (rel_ip < e->start_offset) + hi = mid; + else if (rel_ip >= e->end_offset) + lo = mid + 1; + else + break; + } + return e; +} + +/* + * Build an unwind script that unwinds from state OLD_STATE to the + * entrypoint of the function that called OLD_STATE. + */ +static inline struct unw_script * +build_script (struct unw_frame_info *info) +{ + struct unw_reg_state *rs, *next; + struct unw_table_entry *e = 0; + struct unw_script *script = 0; + unsigned long ip = info->ip; + struct unw_state_record sr; + struct unw_table *table; + struct unw_reg_info *r; + struct unw_insn insn; + u8 *dp, *desc_end; + u64 hdr; + int i; + STAT(unsigned long start, parse_start;) + + STAT(++unw.stat.script.builds; start = ia64_get_itc()); + + /* build state record */ + memset(&sr, 0, sizeof(sr)); + for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) + r->when = UNW_WHEN_NEVER; + sr.pr_val = info->pr_val; + + script = script_new(ip); + if (!script) { + dprintk("unwind: failed to create unwind script\n"); + STAT(unw.stat.script.build_time += ia64_get_itc() - start); + return 0; + } + unw.cache[info->prev_script].hint = script - unw.cache; + + /* search the kernels and the modules' unwind tables for IP: */ + + STAT(parse_start = ia64_get_itc()); + + for (table = unw.tables; table; table = table->next) { + if (ip >= table->start && ip < table->end) { + e = lookup(table, ip - table->segment_base); + break; + } + } + if (!e) { + /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ + dprintk("unwind: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", ip, + unw.cache[info->prev_script].ip); + sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; + sr.curr.reg[UNW_REG_RP].when = -1; + sr.curr.reg[UNW_REG_RP].val = 0; + compile_reg(&sr, UNW_REG_RP, script); + script_finalize(script, &sr); + STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); + STAT(unw.stat.script.build_time += ia64_get_itc() - start); + return script; + } + + sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16 + + (ip & 0xfUL)); + hdr = *(u64 *) (table->segment_base + e->info_offset); + dp = (u8 *) (table->segment_base + e->info_offset + 8); + desc_end = dp + 8*UNW_LENGTH(hdr); + + while (!sr.done && dp < desc_end) + dp = unw_decode(dp, sr.in_body, &sr); + + if (sr.when_target > sr.epilogue_start) { + /* + * sp has been restored and all values on the memory stack below + * psp also have been restored. + */ + sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE; + sr.curr.reg[UNW_REG_PSP].val = 0; + for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) + if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10) + || r->where == UNW_WHERE_SPREL) + r->where = UNW_WHERE_NONE; + } + + script->flags = sr.flags; + + /* + * If RP did't get saved, generate entry for the return link + * register. + */ + if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) { + sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; + sr.curr.reg[UNW_REG_RP].when = -1; + sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; + } + +#if UNW_DEBUG + printk ("unwind: state record for func 0x%lx, t=%u:\n", + table->segment_base + e->start_offset, sr.when_target); + for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { + if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { + printk(" %s <- ", unw.preg_name[r - sr.curr.reg]); + switch (r->where) { + case UNW_WHERE_GR: printk("r%lu", r->val); break; + case UNW_WHERE_FR: printk("f%lu", r->val); break; + case UNW_WHERE_BR: printk("b%lu", r->val); break; + case UNW_WHERE_SPREL: printk("[sp+0x%lx]", r->val); break; + case UNW_WHERE_PSPREL: printk("[psp+0x%lx]", r->val); break; + case UNW_WHERE_NONE: + printk("%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val); + break; + default: printk("BADWHERE(%d)", r->where); break; + } + printk ("\t\t%d\n", r->when); + } + } +#endif + + STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); + + /* translate state record into unwinder instructions: */ + + if (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE + && sr.when_target > sr.curr.reg[UNW_REG_PSP].when && sr.curr.reg[UNW_REG_PSP].val != 0) + { + /* new psp is sp plus frame size */ + insn.opc = UNW_INSN_ADD; + insn.dst = unw.preg_index[UNW_REG_PSP]; + insn.val = sr.curr.reg[UNW_REG_PSP].val; + script_emit(script, insn); + } + + /* determine where the primary UNaT is: */ + if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) + i = UNW_REG_PRI_UNAT_MEM; + else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when) + i = UNW_REG_PRI_UNAT_GR; + else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) + i = UNW_REG_PRI_UNAT_MEM; + else + i = UNW_REG_PRI_UNAT_GR; + + compile_reg(&sr, i, script); + + for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i) + compile_reg(&sr, i, script); + + /* free labelled register states & stack: */ + + STAT(parse_start = ia64_get_itc()); + for (rs = sr.reg_state_list; rs; rs = next) { + next = rs->next; + free_reg_state(rs); + } + while (sr.stack) + pop(&sr); + STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); + + script_finalize(script, &sr); + STAT(unw.stat.script.build_time += ia64_get_itc() - start); + return script; +} + +/* + * Apply the unwinding actions represented by OPS and update SR to + * reflect the state that existed upon entry to the function that this + * unwinder represents. + */ +static inline void +run_script (struct unw_script *script, struct unw_frame_info *state) +{ + struct unw_insn *ip, *limit, next_insn; + unsigned long opc, dst, val, off; + unsigned long *s = (unsigned long *) state; + STAT(unsigned long start;) + + STAT(++unw.stat.script.runs; start = ia64_get_itc()); + state->flags = script->flags; + ip = script->insn; + limit = script->insn + script->count; + next_insn = *ip; + + while (ip++ < limit) { + opc = next_insn.opc; + dst = next_insn.dst; + val = next_insn.val; + next_insn = *ip; + + redo: + switch (opc) { + case UNW_INSN_ADD: + s[dst] += val; + break; + + case UNW_INSN_MOVE2: + if (!s[val]) + goto lazy_init; + s[dst+1] = s[val+1]; + s[dst] = s[val]; + break; + + case UNW_INSN_MOVE: + if (!s[val]) + goto lazy_init; + s[dst] = s[val]; + break; + + case UNW_INSN_MOVE_STACKED: + s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp, + val); + break; + + case UNW_INSN_LOAD_PSPREL: + s[dst] = state->psp + val; + break; + + case UNW_INSN_LOAD_SPREL: + s[dst] = state->sp + val; + break; + + case UNW_INSN_SETNAT_PRI_UNAT: + if (!state->pri_unat) + state->pri_unat = &state->sw->caller_unat; + s[dst+1] = ((*state->pri_unat - s[dst]) << 32) | UNW_NAT_PRI_UNAT; + break; + + case UNW_INSN_SETNAT_TYPE: + s[dst+1] = val; + break; + } + } + STAT(unw.stat.script.run_time += ia64_get_itc() - start); + return; + + lazy_init: + off = unw.sw_off[val]; + s[val] = (unsigned long) state->sw + off; + if (off >= struct_offset (struct unw_frame_info, r4) + && off <= struct_offset (struct unw_frame_info, r7)) + /* + * We're initializing a general register: init NaT info, too. Note that we + * rely on the fact that call_unat is the first field in struct switch_stack: + */ + s[val+1] = (-off << 32) | UNW_NAT_PRI_UNAT; + goto redo; +} + +static int +find_save_locs (struct unw_frame_info *info) +{ + int have_write_lock = 0; + struct unw_script *scr; + + if (info->ip & (my_cpu_data.unimpl_va_mask | 0xf)) { + /* don't let obviously bad addresses pollute the cache */ + dprintk("unwind: rejecting bad ip=0x%lx\n", info->ip); + info->rp = 0; + return -1; + } + + scr = script_lookup(info); + if (!scr) { + scr = build_script(info); + if (!scr) { + dprintk("unwind: failed to locate/build unwind script for ip %lx\n", + info->ip); + return -1; + } + have_write_lock = 1; + } + info->hint = scr->hint; + info->prev_script = scr - unw.cache; + + run_script(scr, info); + + if (have_write_lock) + write_unlock(&scr->lock); + else + read_unlock(&scr->lock); + return 0; +} + +int +unw_unwind (struct unw_frame_info *info) +{ + unsigned long prev_ip, prev_sp, prev_bsp; + unsigned long ip, pr, num_regs; + STAT(unsigned long start, flags;) + int retval; + + STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc()); + + prev_ip = info->ip; + prev_sp = info->sp; + prev_bsp = info->bsp; + + /* restore the ip */ + if (!info->rp) { + dprintk("unwind: failed to locate return link (ip=0x%lx)!\n", info->ip); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return -1; + } + ip = info->ip = *info->rp; + if (ip <= TASK_SIZE) { + dprintk("unwind: reached user-space (ip=0x%lx)\n", ip); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return -1; + } + + /* restore the cfm: */ + if (!info->pfs) { + dprintk("unwind: failed to locate ar.pfs!\n"); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return -1; + } + info->cfm = info->pfs; + + /* restore the bsp: */ + pr = info->pr_val; + num_regs = 0; + if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) { + if ((pr & (1UL << pNonSys)) != 0) + num_regs = *info->cfm & 0x7f; /* size of frame */ + info->pfs = + (unsigned long *) (info->sp + 16 + struct_offset(struct pt_regs, ar_pfs)); + } else + num_regs = (*info->cfm >> 7) & 0x7f; /* size of locals */ + info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); + if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { + dprintk("unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", + info->bsp, info->regstk.limit, info->regstk.top); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return -1; + } + + /* restore the sp: */ + info->sp = info->psp; + if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { + dprintk("unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n", + info->sp, info->regstk.top, info->regstk.limit); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return -1; + } + + if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { + dprintk("unwind: ip, sp, bsp remain unchanged; stopping here (ip=0x%lx)\n", ip); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return -1; + } + + /* finally, restore the predicates: */ + unw_get_pr(info, &info->pr_val); + + retval = find_save_locs(info); + STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); + return retval; +} + +static void +unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) +{ + unsigned long rbslimit, rbstop, stklimit, stktop, sol; + STAT(unsigned long start, flags;) + + STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc()); + + /* + * Subtle stuff here: we _could_ unwind through the + * switch_stack frame but we don't want to do that because it + * would be slow as each preserved register would have to be + * processed. Instead, what we do here is zero out the frame + * info and start the unwind process at the function that + * created the switch_stack frame. When a preserved value in + * switch_stack needs to be accessed, run_script() will + * initialize the appropriate pointer on demand. + */ + memset(info, 0, sizeof(*info)); + + rbslimit = (unsigned long) t + IA64_RBS_OFFSET; + rbstop = sw->ar_bspstore; + if (rbstop - (unsigned long) t >= IA64_STK_OFFSET) + rbstop = rbslimit; + + stklimit = (unsigned long) t + IA64_STK_OFFSET; + stktop = (unsigned long) sw - 16; + if (stktop <= rbstop) + stktop = rbstop; + + info->regstk.limit = rbslimit; + info->regstk.top = rbstop; + info->memstk.limit = stklimit; + info->memstk.top = stktop; + info->sw = sw; + info->sp = info->psp = (unsigned long) (sw + 1) - 16; + info->cfm = &sw->ar_pfs; + sol = (*info->cfm >> 7) & 0x7f; + info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); + info->ip = sw->b0; + info->pr_val = sw->pr; + + find_save_locs(info); + STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); +} + +#endif /* CONFIG_IA64_NEW_UNWIND */ + void -ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info, struct task_struct *t) +unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t) { struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); + +#ifdef CONFIG_IA64_NEW_UNWIND + unw_init_frame_info(info, t, sw); +#else unsigned long sol, limit, top; memset(info, 0, sizeof(*info)); @@ -22,17 +1715,25 @@ if (top - (unsigned long) t >= IA64_STK_OFFSET) top = limit; - info->regstk.limit = (unsigned long *) limit; - info->regstk.top = (unsigned long *) top; - info->bsp = ia64_rse_skip_regs(info->regstk.top, -sol); - info->top_rnat = sw->ar_rnat; - info->cfm = sw->ar_pfs; - info->ip = sw->b0; + info->regstk.limit = limit; + info->regstk.top = top; + info->sw = sw; + info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); + info->cfm = &sw->ar_pfs; + info->ip = sw->b0; +#endif } void -ia64_unwind_init_from_current (struct ia64_frame_info *info, struct pt_regs *regs) +unw_init_from_current (struct unw_frame_info *info, struct pt_regs *regs) { +#ifdef CONFIG_IA64_NEW_UNWIND + struct switch_stack *sw = (struct switch_stack *) regs - 1; + + unw_init_frame_info(info, current, sw); + /* skip over interrupt frame: */ + unw_unwind(info); +#else struct switch_stack *sw = (struct switch_stack *) regs - 1; unsigned long sol, sof, *bsp, limit, top; @@ -44,34 +1745,40 @@ memset(info, 0, sizeof(*info)); sol = (sw->ar_pfs >> 7) & 0x7f; /* size of frame */ - info->regstk.limit = (unsigned long *) limit; - info->regstk.top = (unsigned long *) top; - info->top_rnat = sw->ar_rnat; /* this gives us the bsp top level frame (kdb interrupt frame): */ bsp = ia64_rse_skip_regs((unsigned long *) top, -sol); /* now skip past the interrupt frame: */ sof = regs->cr_ifs & 0x7f; /* size of frame */ - info->cfm = regs->cr_ifs; - info->bsp = ia64_rse_skip_regs(bsp, -sof); + + info->regstk.limit = limit; + info->regstk.top = top; + info->sw = sw; + info->bsp = (unsigned long) ia64_rse_skip_regs(bsp, -sof); + info->cfm = ®s->cr_ifs; info->ip = regs->cr_iip; +#endif } +#ifndef CONFIG_IA64_NEW_UNWIND + static unsigned long -read_reg (struct ia64_frame_info *info, int regnum, int *is_nat) +read_reg (struct unw_frame_info *info, int regnum, int *is_nat) { unsigned long *addr, *rnat_addr, rnat; - addr = ia64_rse_skip_regs(info->bsp, regnum); - if (addr < info->regstk.limit || addr >= info->regstk.top || ((long) addr & 0x7) != 0) { + addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum); + if ((unsigned long) addr < info->regstk.limit + || (unsigned long) addr >= info->regstk.top || ((long) addr & 0x7) != 0) + { *is_nat = 1; return 0xdeadbeefdeadbeef; } rnat_addr = ia64_rse_rnat_addr(addr); - if (rnat_addr >= info->regstk.top) - rnat = info->top_rnat; + if ((unsigned long) rnat_addr >= info->regstk.top) + rnat = info->sw->ar_rnat; else rnat = *rnat_addr; *is_nat = (rnat & (1UL << ia64_rse_slot_num(addr))) != 0; @@ -83,9 +1790,9 @@ * store for r32. */ int -ia64_unwind_to_previous_frame (struct ia64_frame_info *info) +unw_unwind (struct unw_frame_info *info) { - unsigned long sol, cfm = info->cfm; + unsigned long sol, cfm = *info->cfm; int is_nat; sol = (cfm >> 7) & 0x7f; /* size of locals */ @@ -103,16 +1810,187 @@ return -1; info->ip = read_reg(info, sol - 2, &is_nat); - if (is_nat) + if (is_nat || (info->ip & (my_cpu_data.unimpl_va_mask | 0xf))) + /* reject let obviously bad addresses */ return -1; + info->cfm = ia64_rse_skip_regs((unsigned long *) info->bsp, sol - 1); cfm = read_reg(info, sol - 1, &is_nat); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; - info->cfm = cfm; - info->bsp = ia64_rse_skip_regs(info->bsp, -sol); + info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -sol); return 0; +} +#endif /* !CONFIG_IA64_NEW_UNWIND */ + +#ifdef CONFIG_IA64_NEW_UNWIND + +static void +init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base, + unsigned long gp, void *table_start, void *table_end) +{ + struct unw_table_entry *start = table_start, *end = table_end; + +#ifdef UNWIND_TABLE_SORT_BUG + { + struct unw_table_entry *e1, *e2, tmp; + + /* stupid bubble sort... */ + + for (e1 = start; e1 < end; ++e1) { + for (e2 = e1 + 1; e2 < end; ++e2) { + if (e2->start_offset < e1->start_offset) { + tmp = *e1; + *e1 = *e2; + *e2 = tmp; + } + } + } + } +#endif + table->name = name; + table->segment_base = segment_base; + table->gp = gp; + table->start = segment_base + start[0].start_offset; + table->end = segment_base + end[-1].end_offset; + table->array = start; + table->length = end - start; +} + +void * +unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, + void *table_start, void *table_end) +{ + struct unw_table_entry *start = table_start, *end = table_end; + struct unw_table *table; + unsigned long flags; + + if (end - start <= 0) { + dprintk("unwind: ignoring attempt to insert empty unwind table\n"); + return 0; + } + + table = kmalloc(sizeof(*table), GFP_USER); + if (!table) + return 0; + + init_unwind_table(table, name, segment_base, gp, table_start, table_end); + + spin_lock_irqsave(&unw.lock, flags); + { + /* keep kernel unwind table at the front (it's searched most commonly): */ + table->next = unw.tables->next; + unw.tables->next = table; + } + spin_unlock_irqrestore(&unw.lock, flags); + + return table; +} + +void +unw_remove_unwind_table (void *handle) +{ + struct unw_table *table, *prevt; + struct unw_script *tmp, *prev; + unsigned long flags; + long index; + + if (!handle) { + dprintk("unwind: ignoring attempt to remove non-existent unwind table\n"); + return; + } + + table = handle; + if (table == &unw.kernel_table) { + dprintk("unwind: sorry, freeing the kernel's unwind table is a no-can-do!\n"); + return; + } + + spin_lock_irqsave(&unw.lock, flags); + { + /* first, delete the table: */ + + for (prevt = (struct unw_table *) &unw.tables; prevt; prevt = prevt->next) + if (prevt->next == table) + break; + if (!prevt) { + dprintk("unwind: failed to find unwind table %p\n", table); + spin_unlock_irqrestore(&unw.lock, flags); + return; + } + prevt->next = table->next; + + /* next, remove hash table entries for this table */ + + for (index = 0; index <= UNW_HASH_SIZE; ++index) { + if (unw.hash[index] >= UNW_CACHE_SIZE) + continue; + + tmp = unw.cache + unw.hash[index]; + prev = 0; + while (1) { + write_lock(&tmp->lock); + { + if (tmp->ip >= table->start && tmp->ip < table->end) { + if (prev) + prev->coll_chain = tmp->coll_chain; + else + unw.hash[index] = -1; + tmp->ip = 0; + } else + prev = tmp; + } + write_unlock(&tmp->lock); + } + } + } + spin_unlock_irqrestore(&unw.lock, flags); + + kfree(table); +} +#endif /* CONFIG_IA64_NEW_UNWIND */ + +void +unw_init (void) +{ +#ifdef CONFIG_IA64_NEW_UNWIND + extern int ia64_unw_start, ia64_unw_end, __gp; + extern void unw_hash_index_t_is_too_narrow (void); + long i, off; + + if (8*sizeof (unw_hash_index_t) < UNW_LOG_HASH_SIZE) + unw_hash_index_t_is_too_narrow(); + + unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT); + unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE); + unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT); + unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0); + unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT); + unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR); + unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC); + unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR); + for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8) + unw.sw_off[unw.preg_index[i]] = off; + for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8) + unw.sw_off[unw.preg_index[i]] = off; + for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16) + unw.sw_off[unw.preg_index[i]] = off; + for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16) + unw.sw_off[unw.preg_index[i]] = off; + + unw.cache[0].coll_chain = -1; + for (i = 1; i < UNW_CACHE_SIZE; ++i) { + unw.cache[i].lru_chain = (i - 1); + unw.cache[i].coll_chain = -1; + unw.cache[i].lock = RW_LOCK_UNLOCKED; + } + unw.lru_head = UNW_CACHE_SIZE - 1; + unw.lru_tail = 0; + + init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp, + &ia64_unw_start, &ia64_unw_end); +#endif /* CONFIG_IA64_NEW_UNWIND */ } diff -urN linux-2.4.0-test1/arch/ia64/kernel/unwind_decoder.c linux-2.4.0-test1-lia/arch/ia64/kernel/unwind_decoder.c --- linux-2.4.0-test1/arch/ia64/kernel/unwind_decoder.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/unwind_decoder.c Tue May 23 17:35:51 2000 @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2000 Hewlett-Packard Co + * Copyright (C) 2000 David Mosberger-Tang + * + * Generic IA-64 unwind info decoder. + * + * This file is used both by the Linux kernel and objdump. Please keep + * the two copies of this file in sync. + * + * You need to customize the decoder by defining the following + * macros/constants before including this file: + * + * Types: + * unw_word Unsigned integer type with at least 64 bits + * + * Register names: + * UNW_REG_BSP + * UNW_REG_BSPSTORE + * UNW_REG_FPSR + * UNW_REG_LC + * UNW_REG_PFS + * UNW_REG_PR + * UNW_REG_RNAT + * UNW_REG_PSP + * UNW_REG_RP + * UNW_REG_UNAT + * + * Decoder action macros: + * UNW_DEC_BAD_CODE(code) + * UNW_DEC_ABI(fmt,abi,context,arg) + * UNW_DEC_BR_GR(fmt,brmask,gr,arg) + * UNW_DEC_BR_MEM(fmt,brmask,arg) + * UNW_DEC_COPY_STATE(fmt,label,arg) + * UNW_DEC_EPILOGUE(fmt,t,ecount,arg) + * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg) + * UNW_DEC_FR_MEM(fmt,frmask,arg) + * UNW_DEC_GR_GR(fmt,grmask,gr,arg) + * UNW_DEC_GR_MEM(fmt,grmask,arg) + * UNW_DEC_LABEL_STATE(fmt,label,arg) + * UNW_DEC_MEM_STACK_F(fmt,t,size,arg) + * UNW_DEC_MEM_STACK_V(fmt,t,arg) + * UNW_DEC_PRIUNAT_GR(fmt,r,arg) + * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) + * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) + * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg) + * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg) + * UNW_DEC_PROLOGUE(fmt,body,rlen,arg) + * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg) + * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg) + * UNW_DEC_REG_REG(fmt,src,dst,arg) + * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg) + * UNW_DEC_REG_WHEN(fmt,reg,t,arg) + * UNW_DEC_RESTORE(fmt,t,abreg,arg) + * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) + * UNW_DEC_SPILL_BASE(fmt,pspoff,arg) + * UNW_DEC_SPILL_MASK(fmt,imaskp,arg) + * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg) + * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) + * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg) + * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) + * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg) + * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) + */ + +static unw_word +unw_decode_uleb128 (unsigned char **dpp) +{ + unsigned shift = 0; + unw_word byte, result = 0; + unsigned char *bp = *dpp; + + while (1) + { + byte = *bp++; + result |= (byte & 0x7f) << shift; + if ((byte & 0x80) == 0) + break; + shift += 7; + } + *dpp = bp; + return result; +} + +static unsigned char * +unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char byte1, abreg; + unw_word t, off; + + byte1 = *dp++; + t = unw_decode_uleb128 (&dp); + off = unw_decode_uleb128 (&dp); + abreg = (byte1 & 0x7f); + if (byte1 & 0x80) + UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg); + else + UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg); + return dp; +} + +static unsigned char * +unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char byte1, byte2, abreg, x, ytreg; + unw_word t; + + byte1 = *dp++; byte2 = *dp++; + t = unw_decode_uleb128 (&dp); + abreg = (byte1 & 0x7f); + ytreg = byte2; + x = (byte1 >> 7) & 1; + if ((byte1 & 0x80) == 0 && ytreg == 0) + UNW_DEC_RESTORE(X2, t, abreg, arg); + else + UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg); + return dp; +} + +static unsigned char * +unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char byte1, byte2, abreg, qp; + unw_word t, off; + + byte1 = *dp++; byte2 = *dp++; + t = unw_decode_uleb128 (&dp); + off = unw_decode_uleb128 (&dp); + + qp = (byte1 & 0x3f); + abreg = (byte2 & 0x7f); + + if (byte1 & 0x80) + UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); + else + UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); + return dp; +} + +static unsigned char * +unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; + unw_word t; + + byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; + t = unw_decode_uleb128 (&dp); + + qp = (byte1 & 0x3f); + abreg = (byte2 & 0x7f); + x = (byte2 >> 7) & 1; + ytreg = byte3; + + if ((byte2 & 0x80) == 0 && byte3 == 0) + UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); + else + UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); + return dp; +} + +static unsigned char * +unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg) +{ + int body = (code & 0x20) != 0; + unw_word rlen; + + rlen = (code & 0x1f); + UNW_DEC_PROLOGUE(R1, body, rlen, arg); + return dp; +} + +static unsigned char * +unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char byte1, mask, grsave; + unw_word rlen; + + byte1 = *dp++; + + mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); + grsave = (byte1 & 0x7f); + rlen = unw_decode_uleb128 (&dp); + UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg); + return dp; +} + +static unsigned char * +unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg) +{ + unw_word rlen; + + rlen = unw_decode_uleb128 (&dp); + UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg); + return dp; +} + +static unsigned char * +unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char brmask = (code & 0x1f); + + UNW_DEC_BR_MEM(P1, brmask, arg); + return dp; +} + +static unsigned char * +unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg) +{ + if ((code & 0x10) == 0) + { + unsigned char byte1 = *dp++; + + UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1), + (byte1 & 0x7f), arg); + } + else if ((code & 0x08) == 0) + { + unsigned char byte1 = *dp++, r, dst; + + r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); + dst = (byte1 & 0x7f); + switch (r) + { + case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break; + case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break; + case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break; + case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break; + case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break; + case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break; + case 6: UNW_DEC_RP_BR(P3, dst, arg); break; + case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break; + case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break; + case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break; + case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break; + case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break; + default: UNW_DEC_BAD_CODE(r); break; + } + } + else if ((code & 0x7) == 0) + UNW_DEC_SPILL_MASK(P4, dp, arg); + else if ((code & 0x7) == 1) + { + unw_word grmask, frmask, byte1, byte2, byte3; + + byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; + grmask = ((byte1 >> 4) & 0xf); + frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3; + UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg); + } + else + UNW_DEC_BAD_CODE(code); + return dp; +} + +static unsigned char * +unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg) +{ + int gregs = (code & 0x10) != 0; + unsigned char mask = (code & 0x0f); + + if (gregs) + UNW_DEC_GR_MEM(P6, mask, arg); + else + UNW_DEC_FR_MEM(P6, mask, arg); + return dp; +} + +static unsigned char * +unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg) +{ + unsigned char r, byte1, byte2; + unw_word t, size; + + if ((code & 0x10) == 0) + { + r = (code & 0xf); + t = unw_decode_uleb128 (&dp); + switch (r) + { + case 0: + size = unw_decode_uleb128 (&dp); + UNW_DEC_MEM_STACK_F(P7, t, size, arg); + break; + + case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break; + case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break; + case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break; + case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break; + case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break; + case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break; + case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break; + case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break; + case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break; + case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break; + case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break; + case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break; + case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break; + case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break; + case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break; + default: UNW_DEC_BAD_CODE(r); break; + } + } + else + { + switch (code & 0xf) + { + case 0x0: /* p8 */ + { + r = *dp++; + t = unw_decode_uleb128 (&dp); + switch (r) + { + case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break; + case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break; + case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break; + case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break; + case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break; + case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break; + case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break; + case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break; + case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break; + case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break; + case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break; + case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break; + case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break; + case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break; + case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break; + case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break; + case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break; + case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break; + case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break; + default: UNW_DEC_BAD_CODE(r); break; + } + } + break; + + case 0x1: + byte1 = *dp++; byte2 = *dp++; + UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg); + break; + + case 0xf: /* p10 */ + byte1 = *dp++; byte2 = *dp++; + UNW_DEC_ABI(P10, byte1, byte2, arg); + break; + + case 0x9: + return unw_decode_x1 (dp, code, arg); + + case 0xa: + return unw_decode_x2 (dp, code, arg); + + case 0xb: + return unw_decode_x3 (dp, code, arg); + + case 0xc: + return unw_decode_x4 (dp, code, arg); + + default: + UNW_DEC_BAD_CODE(code); + break; + } + } + return dp; +} + +static unsigned char * +unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg) +{ + unw_word label = (code & 0x1f); + + if ((code & 0x20) != 0) + UNW_DEC_COPY_STATE(B1, label, arg); + else + UNW_DEC_LABEL_STATE(B1, label, arg); + return dp; +} + +static unsigned char * +unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg) +{ + unw_word t; + + t = unw_decode_uleb128 (&dp); + UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg); + return dp; +} + +static unsigned char * +unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg) +{ + unw_word t, ecount, label; + + if ((code & 0x10) == 0) + { + t = unw_decode_uleb128 (&dp); + ecount = unw_decode_uleb128 (&dp); + UNW_DEC_EPILOGUE(B3, t, ecount, arg); + } + else if ((code & 0x07) == 0) + { + label = unw_decode_uleb128 (&dp); + if ((code & 0x08) != 0) + UNW_DEC_COPY_STATE(B4, label, arg); + else + UNW_DEC_LABEL_STATE(B4, label, arg); + } + else + switch (code & 0x7) + { + case 1: return unw_decode_x1 (dp, code, arg); + case 2: return unw_decode_x2 (dp, code, arg); + case 3: return unw_decode_x3 (dp, code, arg); + case 4: return unw_decode_x4 (dp, code, arg); + default: UNW_DEC_BAD_CODE(code); break; + } + return dp; +} + +typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *); + +static unw_decoder unw_decode_table[2][8] = +{ + /* prologue table: */ + { + unw_decode_r1, /* 0 */ + unw_decode_r1, + unw_decode_r2, + unw_decode_r3, + unw_decode_p1, /* 4 */ + unw_decode_p2_p5, + unw_decode_p6, + unw_decode_p7_p10 + }, + { + unw_decode_r1, /* 0 */ + unw_decode_r1, + unw_decode_r2, + unw_decode_r3, + unw_decode_b1, /* 4 */ + unw_decode_b1, + unw_decode_b2, + unw_decode_b3_x4 + } +}; + +/* + * Decode one descriptor and return address of next descriptor. + */ +static inline unsigned char * +unw_decode (unsigned char *dp, int inside_body, void *arg) +{ + unw_decoder decoder; + unsigned char code; + + code = *dp++; + decoder = unw_decode_table[inside_body][code >> 5]; + dp = (*decoder) (dp, code, arg); + return dp; +} diff -urN linux-2.4.0-test1/arch/ia64/kernel/unwind_i.h linux-2.4.0-test1-lia/arch/ia64/kernel/unwind_i.h --- linux-2.4.0-test1/arch/ia64/kernel/unwind_i.h Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/arch/ia64/kernel/unwind_i.h Thu Jun 1 01:08:05 2000 @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2000 Hewlett-Packard Co + * Copyright (C) 2000 David Mosberger-Tang + * + * Kernel unwind support. + */ + +#define UNW_VER(x) ((x) >> 48) +#define UNW_FLAG_MASK 0x0000ffff00000000 +#define UNW_FLAG_OSMASK 0x0000f00000000000 +#define UNW_FLAG_EHANDLER(x) ((x) & 0x0000000100000000L) +#define UNW_FLAG_UHANDLER(x) ((x) & 0x0000000200000000L) +#define UNW_LENGTH(x) ((x) & 0x00000000ffffffffL) + +enum unw_register_index { + /* primary unat: */ + UNW_REG_PRI_UNAT_GR, + UNW_REG_PRI_UNAT_MEM, + + /* register stack */ + UNW_REG_BSP, /* register stack pointer */ + UNW_REG_BSPSTORE, + UNW_REG_PFS, /* previous function state */ + UNW_REG_RNAT, + /* memory stack */ + UNW_REG_PSP, /* previous memory stack pointer */ + /* return pointer: */ + UNW_REG_RP, + + /* preserved registers: */ + UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7, + UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR, + UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5, + UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5, + UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19, + UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23, + UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27, + UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31, + UNW_NUM_REGS +}; + +struct unw_info_block { + u64 header; + u64 desc[0]; /* unwind descriptors */ + /* personality routine and language-specific data follow behind descriptors */ +}; + +struct unw_table_entry { + u64 start_offset; + u64 end_offset; + u64 info_offset; +}; + +struct unw_table { + struct unw_table *next; /* must be first member! */ + const char *name; + unsigned long gp; /* global pointer for this load-module */ + unsigned long segment_base; /* base for offsets in the unwind table entries */ + unsigned long start; + unsigned long end; + struct unw_table_entry *array; + unsigned long length; +}; + +enum unw_where { + UNW_WHERE_NONE, /* register isn't saved at all */ + UNW_WHERE_GR, /* register is saved in a general register */ + UNW_WHERE_FR, /* register is saved in a floating-point register */ + UNW_WHERE_BR, /* register is saved in a branch register */ + UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */ + UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */ + /* + * At the end of each prologue these locations get resolved to + * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively: + */ + UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */ + UNW_WHERE_GR_SAVE /* register is saved in next general register */ +}; + +#define UNW_WHEN_NEVER 0x7fffffff + +struct unw_reg_info { + unsigned long val; /* save location: register number or offset */ + enum unw_where where; /* where the register gets saved */ + int when; /* when the register gets saved */ +}; + +struct unw_state_record { + unsigned int first_region : 1; /* is this the first region? */ + unsigned int done : 1; /* are we done scanning descriptors? */ + unsigned int any_spills : 1; /* got any register spills? */ + unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */ + unsigned long flags; /* see UNW_FLAG_* in unwind.h */ + + u8 *imask; /* imask of of spill_mask record or NULL */ + unsigned long pr_val; /* predicate values */ + unsigned long pr_mask; /* predicate mask */ + long spill_offset; /* psp-relative offset for spill base */ + int region_start; + int region_len; + int epilogue_start; + int epilogue_count; + int when_target; + + u8 gr_save_loc; /* next general register to use for saving a register */ + u8 return_link_reg; /* branch register in which the return link is passed */ + + struct unw_reg_state { + struct unw_reg_state *next; + unsigned long label; /* label of this state record */ + struct unw_reg_info reg[UNW_NUM_REGS]; + } curr, *stack, *reg_state_list; +}; + +enum unw_nat_type { + UNW_NAT_NONE, /* NaT not represented */ + UNW_NAT_VAL, /* NaT represented by NaT value (fp reg) */ + UNW_NAT_PRI_UNAT, /* NaT value is in unat word at offset OFF */ + UNW_NAT_SCRATCH, /* NaT value is in scratch.pri_unat */ + UNW_NAT_STACKED /* NaT is in rnat */ +}; + +enum unw_insn_opcode { + UNW_INSN_ADD, /* s[dst] += val */ + UNW_INSN_MOVE, /* s[dst] = s[val] */ + UNW_INSN_MOVE2, /* s[dst] = s[val]; s[dst+1] = s[val+1] */ + UNW_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp, val) */ + UNW_INSN_LOAD_PSPREL, /* s[dst] = *(*s.psp + 8*val) */ + UNW_INSN_LOAD_SPREL, /* s[dst] = *(*s.sp + 8*val) */ + UNW_INSN_SETNAT_PRI_UNAT, /* s[dst+1].nat.type = PRI_UNAT; + s[dst+1].nat.off = *s.pri_unat - s[dst] */ + UNW_INSN_SETNAT_TYPE /* s[dst+1].nat.type = val */ +}; + +struct unw_insn { + unsigned int opc : 4; + unsigned int dst : 9; + signed int val : 19; +}; + +/* + * Preserved general static registers (r2-r5) give rise to two script + * instructions; everything else yields at most one instruction; at + * the end of the script, the psp gets popped, accounting for one more + * instruction. + */ +#define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5) + +struct unw_script { + unsigned long ip; /* ip this script is for */ + unsigned long pr_mask; /* mask of predicates script depends on */ + unsigned long pr_val; /* predicate values this script is for */ + rwlock_t lock; + unsigned int flags; /* see UNW_FLAG_* in unwind.h */ + unsigned short lru_chain; /* used for least-recently-used chain */ + unsigned short coll_chain; /* used for hash collisions */ + unsigned short hint; /* hint for next script to try (or -1) */ + unsigned short count; /* number of instructions in script */ + struct unw_insn insn[UNW_MAX_SCRIPT_LEN]; +}; diff -urN linux-2.4.0-test1/arch/ia64/lib/Makefile linux-2.4.0-test1-lia/arch/ia64/lib/Makefile --- linux-2.4.0-test1/arch/ia64/lib/Makefile Thu Mar 30 16:56:04 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/Makefile Thu May 25 23:06:07 2000 @@ -5,15 +5,18 @@ .S.o: $(CC) $(AFLAGS) -c $< -o $@ -OBJS = __divdi3.o __divsi3.o __udivdi3.o __udivsi3.o \ +L_TARGET = lib.a + +L_OBJS = __divdi3.o __divsi3.o __udivdi3.o __udivsi3.o \ __moddi3.o __modsi3.o __umoddi3.o __umodsi3.o \ checksum.o clear_page.o csum_partial_copy.o copy_page.o \ copy_user.o clear_user.o memset.o strncpy_from_user.o \ strlen.o strlen_user.o strnlen_user.o \ flush.o do_csum.o -lib.a: $(OBJS) - $(AR) rcs lib.a $(OBJS) +LX_OBJS = io.o + +include $(TOPDIR)/Rules.make __divdi3.o: idiv.S $(CC) $(AFLAGS) -c -o $@ $< @@ -38,5 +41,3 @@ __umodsi3.o: idiv.S $(CC) $(AFLAGS) -c -DMODULO -DUNSIGNED -DSINGLE -c -o $@ $< - -include $(TOPDIR)/Rules.make diff -urN linux-2.4.0-test1/arch/ia64/lib/clear_page.S linux-2.4.0-test1-lia/arch/ia64/lib/clear_page.S --- linux-2.4.0-test1/arch/ia64/lib/clear_page.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/clear_page.S Thu May 25 23:06:16 2000 @@ -10,10 +10,11 @@ * Output: * none * - * Copyright (C) 1999 Hewlett-Packard Co + * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999 Stephane Eranian - * Copyright (C) 1999 David Mosberger-Tang + * Copyright (C) 1999-2000 David Mosberger-Tang */ +#include #include .text @@ -21,12 +22,14 @@ .psr lsb .lsb - .align 32 - .global clear_page - .proc clear_page -clear_page: +GLOBAL_ENTRY(clear_page) + UNW(.prologue) alloc r11=ar.pfs,1,0,0,0 + UNW(.save ar.lc, r16) mov r16=ar.lc // slow + + UNW(.body) + mov r17=PAGE_SIZE/32-1 // -1 = repeat/until ;; adds r18=16,in0 @@ -38,5 +41,4 @@ ;; mov ar.lc=r16 // restore lc br.ret.sptk.few rp - - .endp clear_page +END(clear_page) diff -urN linux-2.4.0-test1/arch/ia64/lib/clear_user.S linux-2.4.0-test1-lia/arch/ia64/lib/clear_user.S --- linux-2.4.0-test1/arch/ia64/lib/clear_user.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/clear_user.S Thu May 25 23:06:22 2000 @@ -11,6 +11,8 @@ * Copyright (C) 1999 Stephane Eranian */ +#include + // // arguments // @@ -23,11 +25,10 @@ #define cnt r16 #define buf2 r17 #define saved_lc r18 -#define saved_pr r19 -#define saved_pfs r20 -#define tmp r21 -#define len2 r22 -#define len3 r23 +#define saved_pfs r19 +#define tmp r20 +#define len2 r21 +#define len3 r22 // // Theory of operations: @@ -65,14 +66,14 @@ .psr lsb .lsb - .align 32 - .global __do_clear_user - .proc __do_clear_user - -__do_clear_user: +GLOBAL_ENTRY(__do_clear_user) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,2,0,0,0 cmp.eq p6,p0=r0,len // check for zero length + UNW(.save ar.lc, saved_lc) mov saved_lc=ar.lc // preserve ar.lc (slow) + .body ;; // avoid WAW on CFM adds tmp=-1,len // br.ctop is repeat/until mov ret0=len // return value is length at this point @@ -222,4 +223,4 @@ mov ret0=len mov ar.lc=saved_lc br.ret.dptk.few rp - .endp +END(__do_clear_user) diff -urN linux-2.4.0-test1/arch/ia64/lib/copy_page.S linux-2.4.0-test1-lia/arch/ia64/lib/copy_page.S --- linux-2.4.0-test1/arch/ia64/lib/copy_page.S Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/copy_page.S Thu May 25 23:06:29 2000 @@ -13,6 +13,7 @@ * Copyright (C) 1999 Hewlett-Packard Co * Copyright (C) 1999 Stephane Eranian */ +#include #include #define PIPE_DEPTH 6 @@ -32,19 +33,21 @@ .psr lsb .lsb - .align 32 - .global copy_page - .proc copy_page - -copy_page: +GLOBAL_ENTRY(copy_page) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7) .rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH] .rotp p[PIPE_DEPTH] + UNW(.save ar.lc, saved_lc) mov saved_lc=ar.lc // save ar.lc ahead of time + UNW(.save pr, saved_pr) mov saved_pr=pr // rotating predicates are preserved // resgisters we must save. + UNW(.body) + mov src1=in1 // initialize 1st stream source adds src2=8,in1 // initialize 2nd stream source mov lcount=PAGE_SIZE/16-1 // as many 16bytes as there are on a page @@ -87,5 +90,4 @@ mov ar.pfs=saved_pfs // restore ar.ec mov ar.lc=saved_lc // restore saved lc br.ret.sptk.few rp // bye... - - .endp copy_page +END(copy_page) diff -urN linux-2.4.0-test1/arch/ia64/lib/copy_user.S linux-2.4.0-test1-lia/arch/ia64/lib/copy_user.S --- linux-2.4.0-test1/arch/ia64/lib/copy_user.S Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/copy_user.S Thu May 25 23:06:38 2000 @@ -29,6 +29,8 @@ * - fix extraneous stop bit introduced by the EX() macro. */ +#include + // The label comes first because our store instruction contains a comma // and confuse the preprocessor otherwise // @@ -81,10 +83,9 @@ .psr abi64 .psr lsb - .align 16 - .global __copy_user - .proc __copy_user -__copy_user: +GLOBAL_ENTRY(__copy_user) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7) .rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH] @@ -95,13 +96,17 @@ ;; // RAW of cfm when len=0 cmp.eq p8,p0=r0,len // check for zero length + UNW(.save ar.lc, saved_lc) mov saved_lc=ar.lc // preserve ar.lc (slow) (p8) br.ret.spnt.few rp // empty mempcy() ;; add enddst=dst,len // first byte after end of source add endsrc=src,len // first byte after end of destination + UNW(.save pr, saved_pr) mov saved_pr=pr // preserve predicates + UNW(.body) + mov dst1=dst // copy because of rotation mov ar.ec=PIPE_DEPTH mov pr.rot=1<<16 // p16=true all others are false @@ -400,7 +405,4 @@ mov ar.pfs=saved_pfs br.ret.dptk.few rp - - - .endp __copy_user - +END(__copy_user) diff -urN linux-2.4.0-test1/arch/ia64/lib/do_csum.S linux-2.4.0-test1-lia/arch/ia64/lib/do_csum.S --- linux-2.4.0-test1/arch/ia64/lib/do_csum.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/do_csum.S Thu May 25 23:06:47 2000 @@ -13,6 +13,8 @@ * */ +#include + // // Theory of operations: // The goal is to go as quickly as possible to the point where @@ -100,10 +102,9 @@ // unsigned long do_csum(unsigned char *buf,int len) - .align 32 - .global do_csum - .proc do_csum -do_csum: +GLOBAL_ENTRY(do_csum) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,2,8,0,8 .rotr p[4], result[3] @@ -125,6 +126,7 @@ ;; and lastoff=7,tmp1 // how many bytes off for last element andcm last=tmp2,tmp3 // address of word containing last byte + UNW(.save pr, saved_pr) mov saved_pr=pr // preserve predicates (rotation) ;; sub tmp3=last,first // tmp3=distance from first to last @@ -145,8 +147,12 @@ shl hmask=hmask,tmp2 // build head mask, mask off [0,firstoff[ ;; shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff] + UNW(.save ar.lc, saved_lc) mov saved_lc=ar.lc // save lc ;; + + UNW(.body) + (p8) and hmask=hmask,tmask // apply tail mask to head mask if 1 word only (p9) and p[1]=lastval,tmask // mask last it as appropriate shr.u tmp3=tmp3,3 // we do 8 bytes per loop @@ -228,3 +234,4 @@ mov ar.lc=saved_lc (p10) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes br.ret.sptk.few rp +END(do_csum) diff -urN linux-2.4.0-test1/arch/ia64/lib/flush.S linux-2.4.0-test1-lia/arch/ia64/lib/flush.S --- linux-2.4.0-test1/arch/ia64/lib/flush.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/flush.S Thu May 25 23:07:00 2000 @@ -1,9 +1,10 @@ /* * Cache flushing routines. * - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 David Mosberger-Tang + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger-Tang */ +#include #include .text @@ -11,12 +12,14 @@ .psr lsb .lsb - .align 16 - .global ia64_flush_icache_page - .proc ia64_flush_icache_page -ia64_flush_icache_page: +GLOBAL_ENTRY(ia64_flush_icache_page) + UNW(.prologue) alloc r2=ar.pfs,1,0,0,0 + UNW(.save ar.lc, r3) mov r3=ar.lc // save ar.lc + + .body + mov r8=PAGE_SIZE/64-1 // repeat/until loop ;; mov ar.lc=r8 @@ -34,4 +37,4 @@ ;; mov ar.lc=r3 // restore ar.lc br.ret.sptk.few rp - .endp ia64_flush_icache_page +END(ia64_flush_icache_page) diff -urN linux-2.4.0-test1/arch/ia64/lib/idiv.S linux-2.4.0-test1-lia/arch/ia64/lib/idiv.S --- linux-2.4.0-test1/arch/ia64/lib/idiv.S Tue Feb 8 12:01:59 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/idiv.S Thu May 25 23:07:06 2000 @@ -31,6 +31,7 @@ nops while maximizing parallelism */ +#include #include .text @@ -73,12 +74,10 @@ #define PASTE(a,b) PASTE1(a,b) #define NAME PASTE(PASTE(__,SGN),PASTE(OP,PASTE(PREC,3))) - .align 32 - .global NAME - .proc NAME -NAME: - +GLOBAL_ENTRY(NAME) + UNW(.prologue) alloc r2=ar.pfs,2,6,0,8 + UNW(.save pr, r18) mov r18=pr #ifdef SINGLE # ifdef UNSIGNED @@ -101,6 +100,10 @@ #endif setf.sig f8=in0 + UNW(.save ar.lc, r3) + + UNW(.body) + mov r3=ar.lc // save ar.lc setf.sig f9=in1 ;; @@ -156,3 +159,4 @@ mov ar.lc=r3 // restore ar.lc mov pr=r18,0xffffffffffff0000 // restore p16-p63 br.ret.sptk.few rp +END(NAME) diff -urN linux-2.4.0-test1/arch/ia64/lib/io.c linux-2.4.0-test1-lia/arch/ia64/lib/io.c --- linux-2.4.0-test1/arch/ia64/lib/io.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/arch/ia64/lib/io.c Thu May 25 23:07:14 2000 @@ -0,0 +1,54 @@ +#include +#include + +#include + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void +__ia64_memcpy_fromio (void * to, unsigned long from, long count) +{ + while (count) { + count--; + *(char *) to = readb(from); + ((char *) to)++; + from++; + } +} + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void +__ia64_memcpy_toio (unsigned long to, void * from, long count) +{ + while (count) { + count--; + writeb(*(char *) from, to); + ((char *) from)++; + to++; + } +} + +/* + * "memset" on IO memory space. + * This needs to be optimized. + */ +void +__ia64_memset_c_io (unsigned long dst, unsigned long c, long count) +{ + unsigned char ch = (char)(c & 0xff); + + while (count) { + count--; + writeb(ch, dst); + dst++; + } +} + +EXPORT_SYMBOL(__ia64_memcpy_fromio); +EXPORT_SYMBOL(__ia64_memcpy_toio); +EXPORT_SYMBOL(__ia64_memset_c_io); diff -urN linux-2.4.0-test1/arch/ia64/lib/memset.S linux-2.4.0-test1-lia/arch/ia64/lib/memset.S --- linux-2.4.0-test1/arch/ia64/lib/memset.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/memset.S Thu May 25 23:07:21 2000 @@ -14,6 +14,7 @@ * Copyright (C) 1999 Stephane Eranian */ +#include // arguments // @@ -28,22 +29,23 @@ #define cnt r18 #define buf2 r19 #define saved_lc r20 -#define saved_pr r21 -#define tmp r22 +#define tmp r21 .text .psr abi64 .psr lsb - .align 16 - .global memset - .proc memset - -memset: +GLOBAL_ENTRY(memset) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,3,0,0,0 // cnt is sink here cmp.eq p8,p0=r0,len // check for zero length + UNW(.save ar.lc, saved_lc) mov saved_lc=ar.lc // preserve ar.lc (slow) ;; + + UNW(.body) + adds tmp=-1,len // br.ctop is repeat/until tbit.nz p6,p0=buf,0 // odd alignment (p8) br.ret.spnt.few rp @@ -108,4 +110,4 @@ ;; (p6) st1 [buf]=val // only 1 byte left br.ret.dptk.few rp - .endp +END(memset) diff -urN linux-2.4.0-test1/arch/ia64/lib/strlen.S linux-2.4.0-test1-lia/arch/ia64/lib/strlen.S --- linux-2.4.0-test1/arch/ia64/lib/strlen.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/strlen.S Thu May 25 23:07:27 2000 @@ -16,6 +16,8 @@ * 09/24/99 S.Eranian add speculation recovery code */ +#include + // // // This is an enhanced version of the basic strlen. it includes a combination @@ -82,10 +84,9 @@ .psr lsb .lsb - .align 32 - .global strlen - .proc strlen -strlen: +GLOBAL_ENTRY(strlen) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8 .rotr v[2], w[2] // declares our 4 aliases @@ -93,8 +94,12 @@ extr.u tmp=in0,0,3 // tmp=least significant 3 bits mov orig=in0 // keep trackof initial byte address dep src=0,in0,0,3 // src=8byte-aligned in0 address + UNW(.save pr, saved_pr) mov saved_pr=pr // preserve predicates (rotation) ;; + + UNW(.body) + ld8 v[1]=[src],8 // must not speculate: can fail here shl tmp=tmp,3 // multiply by 8bits/byte mov mask=-1 // our mask @@ -194,5 +199,4 @@ sub ret0=ret0,tmp // length=now - back -1 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.few rp // end of sucessful recovery code - - .endp strlen +END(strlen) diff -urN linux-2.4.0-test1/arch/ia64/lib/strlen_user.S linux-2.4.0-test1-lia/arch/ia64/lib/strlen_user.S --- linux-2.4.0-test1/arch/ia64/lib/strlen_user.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/strlen_user.S Thu May 25 23:07:33 2000 @@ -15,6 +15,8 @@ * 09/24/99 S.Eranian added speculation recovery code */ +#include + // // int strlen_user(char *) // ------------------------ @@ -93,10 +95,9 @@ .psr lsb .lsb - .align 32 - .global __strlen_user - .proc __strlen_user -__strlen_user: +GLOBAL_ENTRY(__strlen_user) + UNW(.prologue) + UNW(.save ar.pfs, saved_pfs) alloc saved_pfs=ar.pfs,11,0,0,8 .rotr v[2], w[2] // declares our 4 aliases @@ -104,8 +105,12 @@ extr.u tmp=in0,0,3 // tmp=least significant 3 bits mov orig=in0 // keep trackof initial byte address dep src=0,in0,0,3 // src=8byte-aligned in0 address + UNW(.save pr, saved_pr) mov saved_pr=pr // preserve predicates (rotation) ;; + + .body + ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate) shl tmp=tmp,3 // multiply by 8bits/byte mov mask=-1 // our mask @@ -209,5 +214,4 @@ mov pr=saved_pr,0xffffffffffff0000 mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.few rp - - .endp __strlen_user +END(__strlen_user) diff -urN linux-2.4.0-test1/arch/ia64/lib/strncpy_from_user.S linux-2.4.0-test1-lia/arch/ia64/lib/strncpy_from_user.S --- linux-2.4.0-test1/arch/ia64/lib/strncpy_from_user.S Fri Mar 10 15:24:02 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/strncpy_from_user.S Thu May 25 23:07:45 2000 @@ -16,6 +16,8 @@ * by Andreas Schwab ). */ +#include + #define EX(x...) \ 99: x; \ .section __ex_table,"a"; \ @@ -28,10 +30,7 @@ .psr lsb .lsb - .align 32 - .global __strncpy_from_user - .proc __strncpy_from_user -__strncpy_from_user: +GLOBAL_ENTRY(__strncpy_from_user) alloc r2=ar.pfs,3,0,0,0 mov r8=0 mov r9=in1 @@ -53,5 +52,4 @@ .Lexit: br.ret.sptk.few rp - - .endp __strncpy_from_user +END(__strncpy_from_user) diff -urN linux-2.4.0-test1/arch/ia64/lib/strnlen_user.S linux-2.4.0-test1-lia/arch/ia64/lib/strnlen_user.S --- linux-2.4.0-test1/arch/ia64/lib/strnlen_user.S Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/arch/ia64/lib/strnlen_user.S Thu May 25 23:07:55 2000 @@ -12,6 +12,8 @@ * Copyright (C) 1999 David Mosberger-Tang */ +#include + /* If a fault occurs, r8 gets set to -EFAULT and r9 gets cleared. */ #define EX(x...) \ .section __ex_table,"a"; \ @@ -25,12 +27,14 @@ .psr lsb .lsb - .align 32 - .global __strnlen_user - .proc __strnlen_user -__strnlen_user: +GLOBAL_ENTRY(__strnlen_user) + UNW(.prologue) alloc r2=ar.pfs,2,0,0,0 + UNW(.save ar.lc, r16) mov r16=ar.lc // preserve ar.lc + + UNW(.body) + add r3=-1,in1 ;; mov ar.lc=r3 @@ -51,5 +55,4 @@ mov r8=r9 mov ar.lc=r16 // restore ar.lc br.ret.sptk.few rp - - .endp __strnlen_user +END(__strnlen_user) diff -urN linux-2.4.0-test1/arch/ia64/mm/fault.c linux-2.4.0-test1-lia/arch/ia64/mm/fault.c --- linux-2.4.0-test1/arch/ia64/mm/fault.c Mon Apr 24 15:52:23 2000 +++ linux-2.4.0-test1-lia/arch/ia64/mm/fault.c Thu Jun 1 01:09:50 2000 @@ -1,8 +1,8 @@ /* * MMU fault handling support. * - * Copyright (C) 1998, 1999 Hewlett-Packard Co - * Copyright (C) 1998, 1999 David Mosberger-Tang + * Copyright (C) 1998-2000 Hewlett-Packard Co + * Copyright (C) 1998-2000 David Mosberger-Tang */ #include #include @@ -94,7 +94,14 @@ * sure we exit gracefully rather than endlessly redo the * fault. */ - if (!handle_mm_fault(mm, vma, address, (isr & IA64_ISR_W) != 0)) { + switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { + case 1: + ++current->min_flt; + break; + case 2: + ++current->maj_flt; + break; + case 0: /* * We ran out of memory, or some other thing happened * to us that made us unable to handle the page fault @@ -102,6 +109,8 @@ */ signal = SIGBUS; goto bad_area; + default: + goto out_of_memory; } up(&mm->mmap_sem); return; @@ -128,15 +137,11 @@ return; } if (user_mode(regs)) { -#if 0 -printk("%s(%d): segfault accessing %lx\n", current->comm, current->pid, address); -show_regs(regs); -#endif si.si_signo = signal; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_addr = (void *) address; - force_sig_info(SIGSEGV, &si, current); + force_sig_info(signal, &si, current); return; } @@ -161,4 +166,11 @@ die_if_kernel("Oops", regs, isr); do_exit(SIGKILL); return; + + out_of_memory: + up(&mm->mmap_sem); + printk("VM: killing process %s\n", current->comm); + if (user_mode(regs)) + do_exit(SIGKILL); + goto no_context; } diff -urN linux-2.4.0-test1/arch/ia64/mm/init.c linux-2.4.0-test1-lia/arch/ia64/mm/init.c --- linux-2.4.0-test1/arch/ia64/mm/init.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/mm/init.c Fri Jun 9 17:19:34 2000 @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -182,6 +183,19 @@ } void +free_initrd_mem(unsigned long start, unsigned long end) +{ + if (start < end) + printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); + for (; start < end; start += PAGE_SIZE) { + clear_bit(PG_reserved, &mem_map[MAP_NR(start)].flags); + set_page_count(&mem_map[MAP_NR(start)], 1); + free_page(start); + ++totalram_pages; + } +} + +void si_meminfo (struct sysinfo *val) { val->totalram = totalram_pages; @@ -265,7 +279,7 @@ void __init ia64_rid_init (void) { - unsigned long flags, rid, pta; + unsigned long flags, rid, pta, impl_va_msb; /* Set up the kernel identity mappings (regions 6 & 7) and the vmalloc area (region 5): */ ia64_clear_ic(flags); @@ -300,11 +314,15 @@ # define ld_max_addr_space_size (ld_max_addr_space_pages + PAGE_SHIFT) # define ld_max_vpt_size (ld_max_addr_space_pages + ld_pte_size) # define POW2(n) (1ULL << (n)) -# define IMPL_VA_MSB 50 - if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(IMPL_VA_MSB)) + impl_va_msb = ffz(~my_cpu_data.unimpl_va_mask) - 1; + + if (impl_va_msb < 50 || impl_va_msb > 60) + panic("Bogus impl_va_msb value of %lu!\n", impl_va_msb); + + if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(impl_va_msb)) panic("mm/init: overlap between virtually mapped linear page table and " "mapped kernel space!"); - pta = POW2(61) - POW2(IMPL_VA_MSB); + pta = POW2(61) - POW2(impl_va_msb); /* * Set the (virtually mapped linear) page table address. Bit * 8 selects between the short and long format, bits 2-7 the @@ -314,54 +332,6 @@ ia64_set_pta(pta | (0<<8) | ((3*(PAGE_SHIFT-3)+3)<<2) | 1); } -#ifdef CONFIG_IA64_VIRTUAL_MEM_MAP - -static int -create_mem_map_page_table (u64 start, u64 end, void *arg) -{ - unsigned long address, start_page, end_page; - struct page *map_start, *map_end; - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - void *page; - - map_start = mem_map + MAP_NR(start); - map_end = mem_map + MAP_NR(end); - - start_page = (unsigned long) map_start & PAGE_MASK; - end_page = PAGE_ALIGN((unsigned long) map_end); - - printk("[%lx,%lx) -> %lx-%lx\n", start, end, start_page, end_page); - - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = pgd_offset_k(address); - if (pgd_none(*pgd)) { - pmd = alloc_bootmem_pages(PAGE_SIZE); - clear_page(pmd); - pgd_set(pgd, pmd); - pmd += (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); - } else - pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) { - pte = alloc_bootmem_pages(PAGE_SIZE); - clear_page(pte); - pmd_set(pmd, pte); - pte += (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - } else - pte = pte_offset(pmd, address); - - if (pte_none(*pte)) { - page = alloc_bootmem_pages(PAGE_SIZE); - clear_page(page); - set_pte(pte, mk_pte_phys(__pa(page), PAGE_KERNEL)); - } - } - return 0; -} - -#endif /* CONFIG_IA64_VIRTUAL_MEM_MAP */ - /* * Set up the page tables. */ @@ -372,9 +342,6 @@ clear_page((void *) ZERO_PAGE_ADDR); - ia64_rid_init(); - __flush_tlb_all(); - /* initialize mem_map[] */ memset(zones_size, 0, sizeof(zones_size)); @@ -426,8 +393,6 @@ max_mapnr = max_low_pfn; high_memory = __va(max_low_pfn * PAGE_SIZE); - - ia64_tlb_init(); totalram_pages += free_all_bootmem(); diff -urN linux-2.4.0-test1/arch/ia64/mm/tlb.c linux-2.4.0-test1-lia/arch/ia64/mm/tlb.c --- linux-2.4.0-test1/arch/ia64/mm/tlb.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/mm/tlb.c Thu Jun 1 01:10:24 2000 @@ -42,6 +42,69 @@ */ spinlock_t ptcg_lock = SPIN_LOCK_UNLOCKED; /* see */ +#if defined(CONFIG_SMP) && !defined(CONFIG_ITANIUM_PTCG) + +#include + +unsigned long flush_end, flush_start, flush_nbits, flush_rid; +atomic_t flush_cpu_count; + +/* + * flush_tlb_no_ptcg is called with ptcg_lock locked + */ +static inline void +flush_tlb_no_ptcg (unsigned long start, unsigned long end, unsigned long nbits) +{ + extern void smp_send_flush_tlb (void); + unsigned long saved_tpr = 0; + unsigned long flags; + + /* + * Some times this is called with interrupts disabled and causes + * dead-lock; to avoid this we enable interrupt and raise the TPR + * to enable ONLY IPI. + */ + __save_flags(flags); + if (!(flags & IA64_PSR_I)) { + saved_tpr = ia64_get_tpr(); + ia64_srlz_d(); + ia64_set_tpr(IPI_IRQ - 16); + ia64_srlz_d(); + local_irq_enable(); + } + + flush_rid = ia64_get_rr(start); + ia64_srlz_d(); + flush_start = start; + flush_end = end; + flush_nbits = nbits; + atomic_set(&flush_cpu_count, smp_num_cpus - 1); + smp_send_flush_tlb(); + /* + * Purge local TLB entries. ALAT invalidation is done in ia64_leave_kernel. + */ + do { + asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); + start += (1UL << nbits); + } while (start < end); + + ia64_srlz_i(); /* srlz.i implies srlz.d */ + + /* + * Wait for other CPUs to finish purging entries. + */ + while (atomic_read(&flush_cpu_count)) { + /* Nothing */ + } + if (!(flags & IA64_PSR_I)) { + local_irq_disable(); + ia64_set_tpr(saved_tpr); + ia64_srlz_d(); + } +} + +#endif /* CONFIG_SMP && !CONFIG_ITANIUM_PTCG */ + void get_new_mmu_context (struct mm_struct *mm) { @@ -97,7 +160,7 @@ stride0 = ia64_ptce_info.stride[0]; stride1 = ia64_ptce_info.stride[1]; - __save_and_cli(flags); + local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { asm volatile ("ptc.e %0" :: "r"(addr)); @@ -105,7 +168,7 @@ } addr += stride0; } - __restore_flags(flags); + local_irq_restore(flags); ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_insn_group_barrier(); @@ -143,22 +206,28 @@ start &= ~((1UL << nbits) - 1); spin_lock(&ptcg_lock); - do { -#ifdef CONFIG_SMP - __asm__ __volatile__ ("ptc.g %0,%1;;srlz.i;;" - :: "r"(start), "r"(nbits<<2) : "memory"); +#if defined(CONFIG_SMP) && !defined(CONFIG_ITANIUM_PTCG) + flush_tlb_no_ptcg(start, end, nbits); #else - __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); -#endif + do { +# ifdef CONFIG_SMP + /* + * Flush ALAT entries also. + */ + asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) : "memory"); +# else + asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory"); +# endif start += (1UL << nbits); } while (start < end); +#endif /* CONFIG_SMP && !defined(CONFIG_ITANIUM_PTCG) */ spin_unlock(&ptcg_lock); ia64_insn_group_barrier(); ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_insn_group_barrier(); } -void +void __init ia64_tlb_init (void) { ia64_get_ptce(&ia64_ptce_info); diff -urN linux-2.4.0-test1/arch/ia64/tools/Makefile linux-2.4.0-test1-lia/arch/ia64/tools/Makefile --- linux-2.4.0-test1/arch/ia64/tools/Makefile Fri May 12 11:32:08 2000 +++ linux-2.4.0-test1-lia/arch/ia64/tools/Makefile Fri May 26 20:38:12 2000 @@ -44,4 +44,4 @@ endif -.PHONY: all +.PHONY: all modules diff -urN linux-2.4.0-test1/arch/ia64/tools/print_offsets.c linux-2.4.0-test1-lia/arch/ia64/tools/print_offsets.c --- linux-2.4.0-test1/arch/ia64/tools/print_offsets.c Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/tools/print_offsets.c Thu May 25 23:08:14 2000 @@ -58,11 +58,95 @@ { "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) }, { "IA64_TASK_MM_OFFSET", offsetof (struct task_struct, mm) }, { "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) }, + { "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) }, + { "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) }, + { "IA64_PT_REGS_AR_UNAT_OFFSET", offsetof (struct pt_regs, ar_unat) }, + { "IA64_PT_REGS_AR_PFS_OFFSET", offsetof (struct pt_regs, ar_pfs) }, + { "IA64_PT_REGS_AR_RSC_OFFSET", offsetof (struct pt_regs, ar_rsc) }, + { "IA64_PT_REGS_AR_RNAT_OFFSET", offsetof (struct pt_regs, ar_rnat) }, + { "IA64_PT_REGS_AR_BSPSTORE_OFFSET",offsetof (struct pt_regs, ar_bspstore) }, + { "IA64_PT_REGS_PR_OFFSET", offsetof (struct pt_regs, pr) }, + { "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) }, + { "IA64_PT_REGS_LOADRS_OFFSET", offsetof (struct pt_regs, loadrs) }, + { "IA64_PT_REGS_R1_OFFSET", offsetof (struct pt_regs, r1) }, + { "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) }, + { "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) }, { "IA64_PT_REGS_R12_OFFSET", offsetof (struct pt_regs, r12) }, + { "IA64_PT_REGS_R13_OFFSET", offsetof (struct pt_regs, r13) }, + { "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) }, + { "IA64_PT_REGS_R15_OFFSET", offsetof (struct pt_regs, r15) }, { "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) }, + { "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) }, + { "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) }, + { "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) }, { "IA64_PT_REGS_R16_OFFSET", offsetof (struct pt_regs, r16) }, - { "IA64_SWITCH_STACK_B0_OFFSET", offsetof (struct switch_stack, b0) }, - { "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) }, + { "IA64_PT_REGS_R17_OFFSET", offsetof (struct pt_regs, r17) }, + { "IA64_PT_REGS_R18_OFFSET", offsetof (struct pt_regs, r18) }, + { "IA64_PT_REGS_R19_OFFSET", offsetof (struct pt_regs, r19) }, + { "IA64_PT_REGS_R20_OFFSET", offsetof (struct pt_regs, r20) }, + { "IA64_PT_REGS_R21_OFFSET", offsetof (struct pt_regs, r21) }, + { "IA64_PT_REGS_R22_OFFSET", offsetof (struct pt_regs, r22) }, + { "IA64_PT_REGS_R23_OFFSET", offsetof (struct pt_regs, r23) }, + { "IA64_PT_REGS_R24_OFFSET", offsetof (struct pt_regs, r24) }, + { "IA64_PT_REGS_R25_OFFSET", offsetof (struct pt_regs, r25) }, + { "IA64_PT_REGS_R26_OFFSET", offsetof (struct pt_regs, r26) }, + { "IA64_PT_REGS_R27_OFFSET", offsetof (struct pt_regs, r27) }, + { "IA64_PT_REGS_R28_OFFSET", offsetof (struct pt_regs, r28) }, + { "IA64_PT_REGS_R29_OFFSET", offsetof (struct pt_regs, r29) }, + { "IA64_PT_REGS_R30_OFFSET", offsetof (struct pt_regs, r30) }, + { "IA64_PT_REGS_R31_OFFSET", offsetof (struct pt_regs, r31) }, + { "IA64_PT_REGS_AR_CCV_OFFSET", offsetof (struct pt_regs, ar_ccv) }, + { "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) }, + { "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) }, + { "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) }, + { "IA64_PT_REGS_F6_OFFSET", offsetof (struct pt_regs, f6) }, + { "IA64_PT_REGS_F7_OFFSET", offsetof (struct pt_regs, f7) }, + { "IA64_PT_REGS_F8_OFFSET", offsetof (struct pt_regs, f8) }, + { "IA64_PT_REGS_F9_OFFSET", offsetof (struct pt_regs, f9) }, + { "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) }, + { "IA64_SWITCH_STACK_AR_FPSR_OFFSET", offsetof (struct switch_stack, ar_fpsr) }, + { "IA64_SWITCH_STACK_F2_OFFSET", offsetof (struct switch_stack, f2) }, + { "IA64_SWITCH_STACK_F3_OFFSET", offsetof (struct switch_stack, f3) }, + { "IA64_SWITCH_STACK_F4_OFFSET", offsetof (struct switch_stack, f4) }, + { "IA64_SWITCH_STACK_F5_OFFSET", offsetof (struct switch_stack, f5) }, + { "IA64_SWITCH_STACK_F10_OFFSET", offsetof (struct switch_stack, f10) }, + { "IA64_SWITCH_STACK_F11_OFFSET", offsetof (struct switch_stack, f11) }, + { "IA64_SWITCH_STACK_F12_OFFSET", offsetof (struct switch_stack, f12) }, + { "IA64_SWITCH_STACK_F13_OFFSET", offsetof (struct switch_stack, f13) }, + { "IA64_SWITCH_STACK_F14_OFFSET", offsetof (struct switch_stack, f14) }, + { "IA64_SWITCH_STACK_F15_OFFSET", offsetof (struct switch_stack, f15) }, + { "IA64_SWITCH_STACK_F16_OFFSET", offsetof (struct switch_stack, f16) }, + { "IA64_SWITCH_STACK_F17_OFFSET", offsetof (struct switch_stack, f17) }, + { "IA64_SWITCH_STACK_F18_OFFSET", offsetof (struct switch_stack, f18) }, + { "IA64_SWITCH_STACK_F19_OFFSET", offsetof (struct switch_stack, f19) }, + { "IA64_SWITCH_STACK_F20_OFFSET", offsetof (struct switch_stack, f20) }, + { "IA64_SWITCH_STACK_F21_OFFSET", offsetof (struct switch_stack, f21) }, + { "IA64_SWITCH_STACK_F22_OFFSET", offsetof (struct switch_stack, f22) }, + { "IA64_SWITCH_STACK_F23_OFFSET", offsetof (struct switch_stack, f23) }, + { "IA64_SWITCH_STACK_F24_OFFSET", offsetof (struct switch_stack, f24) }, + { "IA64_SWITCH_STACK_F25_OFFSET", offsetof (struct switch_stack, f25) }, + { "IA64_SWITCH_STACK_F26_OFFSET", offsetof (struct switch_stack, f26) }, + { "IA64_SWITCH_STACK_F27_OFFSET", offsetof (struct switch_stack, f27) }, + { "IA64_SWITCH_STACK_F28_OFFSET", offsetof (struct switch_stack, f28) }, + { "IA64_SWITCH_STACK_F29_OFFSET", offsetof (struct switch_stack, f29) }, + { "IA64_SWITCH_STACK_F30_OFFSET", offsetof (struct switch_stack, f30) }, + { "IA64_SWITCH_STACK_F31_OFFSET", offsetof (struct switch_stack, f31) }, + { "IA64_SWITCH_STACK_R4_OFFSET", offsetof (struct switch_stack, r4) }, + { "IA64_SWITCH_STACK_R5_OFFSET", offsetof (struct switch_stack, r5) }, + { "IA64_SWITCH_STACK_R6_OFFSET", offsetof (struct switch_stack, r6) }, + { "IA64_SWITCH_STACK_R7_OFFSET", offsetof (struct switch_stack, r7) }, + { "IA64_SWITCH_STACK_B0_OFFSET", offsetof (struct switch_stack, b0) }, + { "IA64_SWITCH_STACK_B1_OFFSET", offsetof (struct switch_stack, b1) }, + { "IA64_SWITCH_STACK_B2_OFFSET", offsetof (struct switch_stack, b2) }, + { "IA64_SWITCH_STACK_B3_OFFSET", offsetof (struct switch_stack, b3) }, + { "IA64_SWITCH_STACK_B4_OFFSET", offsetof (struct switch_stack, b4) }, + { "IA64_SWITCH_STACK_B5_OFFSET", offsetof (struct switch_stack, b5) }, + { "IA64_SWITCH_STACK_AR_PFS_OFFSET", offsetof (struct switch_stack, ar_pfs) }, + { "IA64_SWITCH_STACK_AR_LC_OFFSET", offsetof (struct switch_stack, ar_lc) }, + { "IA64_SWITCH_STACK_AR_UNAT_OFFSET", offsetof (struct switch_stack, ar_unat) }, + { "IA64_SWITCH_STACK_AR_RNAT_OFFSET", offsetof (struct switch_stack, ar_rnat) }, + { "IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET", offsetof (struct switch_stack, ar_bspstore) }, + { "IA64_SWITCH_STACK_PR_OFFSET", offsetof (struct switch_stack, b0) }, { "IA64_SIGCONTEXT_AR_BSP_OFFSET", offsetof (struct sigcontext, sc_ar_bsp) }, { "IA64_SIGCONTEXT_AR_RNAT_OFFSET", offsetof (struct sigcontext, sc_ar_rnat) }, { "IA64_SIGCONTEXT_FLAGS_OFFSET", offsetof (struct sigcontext, sc_flags) }, diff -urN linux-2.4.0-test1/arch/ia64/vmlinux.lds.S linux-2.4.0-test1-lia/arch/ia64/vmlinux.lds.S --- linux-2.4.0-test1/arch/ia64/vmlinux.lds.S Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/arch/ia64/vmlinux.lds.S Fri Jun 9 17:20:07 2000 @@ -32,6 +32,13 @@ #endif _etext = .; + /* Read-only data */ + + __gp = ALIGN(8) + 0x200000; + + /* Global data */ + _data = .; + /* Exception table */ . = ALIGN(16); __start___ex_table = .; @@ -39,19 +46,33 @@ { *(__ex_table) } __stop___ex_table = .; - /* Kernel symbol names for modules: */ + __start___ksymtab = .; /* Kernel symbol table */ + __ksymtab : AT(ADDR(__ksymtab) - PAGE_OFFSET) + { *(__ksymtab) } + __stop___ksymtab = .; + + /* Unwind table */ + ia64_unw_start = .; + .IA_64.unwind : AT(ADDR(.IA_64.unwind) - PAGE_OFFSET) + { *(.IA_64.unwind) } + ia64_unw_end = .; + .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET) + { *(.IA_64.unwind_info) } + + .rodata : AT(ADDR(.rodata) - PAGE_OFFSET) + { *(.rodata) } .kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET) { *(.kstrtab) } + .opd : AT(ADDR(.opd) - PAGE_OFFSET) + { *(.opd) } - /* The initial task and kernel stack */ - . = ALIGN(PAGE_SIZE); - init_task : AT(ADDR(init_task) - PAGE_OFFSET) - { *(init_task) } + /* Initialization code and data: */ - /* Startup code */ + . = ALIGN(PAGE_SIZE); __init_begin = .; .text.init : AT(ADDR(.text.init) - PAGE_OFFSET) { *(.text.init) } + .data.init : AT(ADDR(.data.init) - PAGE_OFFSET) { *(.data.init) } . = ALIGN(16); @@ -66,6 +87,10 @@ . = ALIGN(PAGE_SIZE); __init_end = .; + /* The initial task and kernel stack */ + init_task : AT(ADDR(init_task) - PAGE_OFFSET) + { *(init_task) } + .data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET) { *(.data.idt) } @@ -73,17 +98,12 @@ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET) { *(.data.cacheline_aligned) } - /* Global data */ - _data = .; + /* Kernel symbol names for modules: */ + .kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET) + { *(.kstrtab) } - .rodata : AT(ADDR(.rodata) - PAGE_OFFSET) - { *(.rodata) } - .opd : AT(ADDR(.opd) - PAGE_OFFSET) - { *(.opd) } .data : AT(ADDR(.data) - PAGE_OFFSET) { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } - - __gp = ALIGN (8) + 0x200000; .got : AT(ADDR(.got) - PAGE_OFFSET) { *(.got.plt) *(.got) } diff -urN linux-2.4.0-test1/drivers/char/Config.in linux-2.4.0-test1-lia/drivers/char/Config.in --- linux-2.4.0-test1/drivers/char/Config.in Wed May 24 08:29:47 2000 +++ linux-2.4.0-test1-lia/drivers/char/Config.in Fri May 26 20:39:20 2000 @@ -16,6 +16,11 @@ tristate ' Dual serial port support' CONFIG_DUALSP_SERIAL fi fi + +if [ "$CONFIG_SERIAL" = "n" ]; then + bool 'Simulated serial driver support' CONFIG_SIM_SERIAL +fi + bool 'Extended dumb serial driver options' CONFIG_SERIAL_EXTENDED if [ "$CONFIG_SERIAL_EXTENDED" = "y" ]; then bool ' Support more than 4 serial ports' CONFIG_SERIAL_MANY_PORTS diff -urN linux-2.4.0-test1/drivers/char/Makefile linux-2.4.0-test1-lia/drivers/char/Makefile --- linux-2.4.0-test1/drivers/char/Makefile Sat May 20 12:07:56 2000 +++ linux-2.4.0-test1-lia/drivers/char/Makefile Fri May 26 20:38:45 2000 @@ -118,6 +118,7 @@ endif endif +obj-$(CONFIG_SIM_SERIAL) += simserial.o obj-$(CONFIG_ROCKETPORT) += rocket.o obj-$(CONFIG_MOXA_SMARTIO) += mxser.o obj-$(CONFIG_MOXA_INTELLIO) += moxa.o diff -urN linux-2.4.0-test1/drivers/char/serial.c linux-2.4.0-test1-lia/drivers/char/serial.c --- linux-2.4.0-test1/drivers/char/serial.c Mon May 8 22:00:41 2000 +++ linux-2.4.0-test1-lia/drivers/char/serial.c Fri May 26 20:37:44 2000 @@ -4881,9 +4881,6 @@ int cflag = CREAD | HUPCL | CLOCAL; int quot = 0; char *s; -#if defined(CONFIG_KDB) - extern int kdb_port; -#endif if (options) { baud = simple_strtoul(options, NULL, 10); @@ -4987,14 +4984,6 @@ if (serial_in(info, UART_LSR) == 0xff) return -1; -#if defined(CONFIG_KDB) - /* - * Remember I/O port for kdb - */ - if (kdb_port == 0 ) - kdb_port = ser->port; -#endif /* CONFIG_KDB */ - return 0; } diff -urN linux-2.4.0-test1/drivers/char/simserial.c linux-2.4.0-test1-lia/drivers/char/simserial.c --- linux-2.4.0-test1/drivers/char/simserial.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/drivers/char/simserial.c Tue May 23 17:12:58 2000 @@ -0,0 +1,1097 @@ +/* + * Simulated Serial Driver (fake serial) + * + * This driver is mostly used for bringup purposes and will go away. + * It has a strong dependency on the system console. All outputs + * are rerouted to the same facility as the one used by printk which, in our + * case means sys_sim.c console (goes via the simulator). The code hereafter + * is completely leveraged from the serial.c driver. + * + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999 Stephane Eranian + * Copyright (C) 2000 David Mosberger-Tang + * + * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). + * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_KDB +# include +#endif + +#include +#include + +#undef SIMSERIAL_DEBUG /* define this to get some debug information */ + +#define KEYBOARD_INTR 3 /* must match with simulator! */ +#define SIMSERIAL_IRQ 0xee + +#define NR_PORTS 1 /* only one port for now */ +#define SERIAL_INLINE 1 + +#ifdef SERIAL_INLINE +#define _INLINE_ inline +#endif + +#ifndef MIN +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#endif + +#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) + +#define SSC_GETCHAR 21 + +extern long ia64_ssc (long, long, long, long, int); +extern void ia64_ssc_connect_irq (long intr, long irq); + +static char *serial_name = "SimSerial driver"; +static char *serial_version = "0.6"; + +/* + * This has been extracted from asm/serial.h. We need one eventually but + * I don't know exactly what we're going to put in it so just fake one + * for now. + */ +#define BASE_BAUD ( 1843200 / 16 ) + +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) + +/* + * Most of the values here are meaningless to this particular driver. + * However some values must be preserved for the code (leveraged from serial.c + * to work correctly). + * port must not be 0 + * type must not be UNKNOWN + * So I picked arbitrary (guess from where?) values instead + */ +static struct serial_state rs_table[NR_PORTS]={ + /* UART CLK PORT IRQ FLAGS */ + { 0, BASE_BAUD, 0x3F8, SIMSERIAL_IRQ, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */ +}; + +/* + * Just for the fun of it ! + */ +static struct serial_uart_config uart_config[] = { + { "unknown", 1, 0 }, + { "8250", 1, 0 }, + { "16450", 1, 0 }, + { "16550", 1, 0 }, + { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO }, + { "cirrus", 1, 0 }, + { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH }, + { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | + UART_STARTECH }, + { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO}, + { 0, 0} +}; + +static struct tty_driver serial_driver, callout_driver; +static int serial_refcount; + +static struct async_struct *IRQ_ports[NR_IRQS]; +static struct tty_struct *serial_table[NR_PORTS]; +static struct termios *serial_termios[NR_PORTS]; +static struct termios *serial_termios_locked[NR_PORTS]; + +static struct console *console; + +static unsigned char *tmp_buf; +static DECLARE_MUTEX(tmp_buf_sem); + +extern struct console *console_drivers; /* from kernel/printk.c */ + +/* + * ------------------------------------------------------------ + * rs_stop() and rs_start() + * + * This routines are called before setting or resetting tty->stopped. + * They enable or disable transmitter interrupts, as necessary. + * ------------------------------------------------------------ + */ +static void rs_stop(struct tty_struct *tty) +{ +#ifdef SIMSERIAL_DEBUG + printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", + tty->stopped, tty->hw_stopped, tty->flow_stopped); +#endif + +} + +static void rs_start(struct tty_struct *tty) +{ +#if SIMSERIAL_DEBUG + printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", + tty->stopped, tty->hw_stopped, tty->flow_stopped); +#endif +} + +static void receive_chars(struct tty_struct *tty) +{ + unsigned char ch; + static unsigned char seen_esc = 0; + + while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { + if ( ch == 27 && seen_esc == 0 ) { + seen_esc = 1; + continue; + } else { + if ( seen_esc==1 && ch == 'O' ) { + seen_esc = 2; + continue; + } else if ( seen_esc == 2 ) { + if ( ch == 'P' ) show_state(); /* F1 key */ + if ( ch == 'Q' ) show_buffers(); /* F2 key */ +#ifdef CONFIG_KDB + if ( ch == 'S' ) KDB_ENTER(); /* F4 key */ +#endif + seen_esc = 0; + continue; + } + } + seen_esc = 0; + if (tty->flip.count >= TTY_FLIPBUF_SIZE) break; + + *tty->flip.char_buf_ptr = ch; + + *tty->flip.flag_buf_ptr = 0; + + tty->flip.flag_buf_ptr++; + tty->flip.char_buf_ptr++; + tty->flip.count++; + } + tty_flip_buffer_push(tty); +} + +/* + * This is the serial driver's interrupt routine for a single port + */ +static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs) +{ + struct async_struct * info; + + /* + * I don't know exactly why they don't use the dev_id opaque data + * pointer instead of this extra lookup table + */ + info = IRQ_ports[irq]; + if (!info || !info->tty) { + printk("simrs_interrupt_single: info|tty=0 info=%p problem\n", info); + return; + } + /* + * pretty simple in our case, because we only get interrupts + * on inbound traffic + */ + receive_chars(info->tty); +} + +/* + * ------------------------------------------------------------------- + * Here ends the serial interrupt routines. + * ------------------------------------------------------------------- + */ + +#if 0 +/* + * not really used in our situation so keep them commented out for now + */ +static DECLARE_TASK_QUEUE(tq_serial); /* used to be at the top of the file */ +static void do_serial_bh(void) +{ + run_task_queue(&tq_serial); + printk("do_serial_bh: called\n"); +} +#endif + +static void do_softint(void *private_) +{ + printk("simserial: do_softint called\n"); +} + +static void rs_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + unsigned long flags; + + if (!tty || !info->xmit.buf) return; + + save_flags(flags); cli(); + if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { + restore_flags(flags); + return; + } + info->xmit.buf[info->xmit.head] = ch; + info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); + restore_flags(flags); +} + +static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) +{ + int count; + unsigned long flags; + + save_flags(flags); cli(); + + if (info->x_char) { + char c = info->x_char; + + console->write(console, &c, 1); + + info->state->icount.tx++; + info->x_char = 0; + + goto out; + } + + if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) { +#ifdef SIMSERIAL_DEBUG + printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", + info->xmit.head, info->xmit.tail, info->tty->stopped); +#endif + goto out; + } + /* + * We removed the loop and try to do it in to chunks. We need + * 2 operations maximum because it's a ring buffer. + * + * First from current to tail if possible. + * Then from the beginning of the buffer until necessary + */ + + count = MIN(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), + SERIAL_XMIT_SIZE - info->xmit.tail); + console->write(console, info->xmit.buf+info->xmit.tail, count); + + info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1); + + /* + * We have more at the beginning of the buffer + */ + count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); + if (count) { + console->write(console, info->xmit.buf, count); + info->xmit.tail += count; + } +out: + restore_flags(flags); +} + +static void rs_flush_chars(struct tty_struct *tty) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + + if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || + !info->xmit.buf) + return; + + transmit_chars(info, NULL); +} + + +static int rs_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count) +{ + int c, ret = 0; + struct async_struct *info = (struct async_struct *)tty->driver_data; + unsigned long flags; + + if (!tty || !info->xmit.buf || !tmp_buf) return 0; + + save_flags(flags); + if (from_user) { + down(&tmp_buf_sem); + while (1) { + int c1; + c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); + if (count < c) + c = count; + if (c <= 0) + break; + + c -= copy_from_user(tmp_buf, buf, c); + if (!c) { + if (!ret) + ret = -EFAULT; + break; + } + cli(); + c1 = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); + if (c1 < c) + c = c1; + memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c); + info->xmit.head = ((info->xmit.head + c) & + (SERIAL_XMIT_SIZE-1)); + restore_flags(flags); + buf += c; + count -= c; + ret += c; + } + up(&tmp_buf_sem); + } else { + cli(); + while (1) { + c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); + if (count < c) + c = count; + if (c <= 0) { + break; + } + memcpy(info->xmit.buf + info->xmit.head, buf, c); + info->xmit.head = ((info->xmit.head + c) & + (SERIAL_XMIT_SIZE-1)); + buf += c; + count -= c; + ret += c; + } + restore_flags(flags); + } + /* + * Hey, we transmit directly from here in our case + */ + if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) + && !tty->stopped && !tty->hw_stopped) { + transmit_chars(info, NULL); + } + return ret; +} + +static int rs_write_room(struct tty_struct *tty) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + + return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); +} + +static int rs_chars_in_buffer(struct tty_struct *tty) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + + return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); +} + +static void rs_flush_buffer(struct tty_struct *tty) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + unsigned long flags; + + save_flags(flags); cli(); + info->xmit.head = info->xmit.tail = 0; + restore_flags(flags); + + wake_up_interruptible(&tty->write_wait); + + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && + tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup)(tty); +} + +/* + * This function is used to send a high-priority XON/XOFF character to + * the device + */ +static void rs_send_xchar(struct tty_struct *tty, char ch) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + + info->x_char = ch; + if (ch) { + /* + * I guess we could call console->write() directly but + * let's do that for now. + */ + transmit_chars(info, NULL); + } +} + +/* + * ------------------------------------------------------------ + * rs_throttle() + * + * This routine is called by the upper-layer tty layer to signal that + * incoming characters should be throttled. + * ------------------------------------------------------------ + */ +static void rs_throttle(struct tty_struct * tty) +{ + if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); + + printk("simrs_throttle called\n"); +} + +static void rs_unthrottle(struct tty_struct * tty) +{ + struct async_struct *info = (struct async_struct *)tty->driver_data; + + if (I_IXOFF(tty)) { + if (info->x_char) + info->x_char = 0; + else + rs_send_xchar(tty, START_CHAR(tty)); + } + printk("simrs_unthrottle called\n"); +} + +/* + * rs_break() --- routine which turns the break handling on or off + */ +static void rs_break(struct tty_struct *tty, int break_state) +{ +} + +static int rs_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg) +{ + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && + (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && + (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + } + + switch (cmd) { + case TIOCMGET: + printk("rs_ioctl: TIOCMGET called\n"); + return -EINVAL; + case TIOCMBIS: + case TIOCMBIC: + case TIOCMSET: + printk("rs_ioctl: TIOCMBIS/BIC/SET called\n"); + return -EINVAL; + case TIOCGSERIAL: + printk("simrs_ioctl TIOCGSERIAL called\n"); + return 0; + case TIOCSSERIAL: + printk("simrs_ioctl TIOCSSERIAL called\n"); + return 0; + case TIOCSERCONFIG: + printk("rs_ioctl: TIOCSERCONFIG called\n"); + return -EINVAL; + + case TIOCSERGETLSR: /* Get line status register */ + printk("rs_ioctl: TIOCSERGETLSR called\n"); + return -EINVAL; + + case TIOCSERGSTRUCT: + printk("rs_ioctl: TIOCSERGSTRUCT called\n"); +#if 0 + if (copy_to_user((struct async_struct *) arg, + info, sizeof(struct async_struct))) + return -EFAULT; +#endif + return 0; + + /* + * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change + * - mask passed in arg for lines of interest + * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) + * Caller should use TIOCGICOUNT to see which one it was + */ + case TIOCMIWAIT: + printk("rs_ioctl: TIOCMIWAIT: called\n"); + return 0; + /* + * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) + * Return: write counters to the user passed counter struct + * NB: both 1->0 and 0->1 transitions are counted except for + * RI where only 0->1 is counted. + */ + case TIOCGICOUNT: + printk("rs_ioctl: TIOCGICOUNT called\n"); + return 0; + + case TIOCSERGWILD: + case TIOCSERSWILD: + /* "setserial -W" is called in Debian boot */ + printk ("TIOCSER?WILD ioctl obsolete, ignored.\n"); + return 0; + + default: + return -ENOIOCTLCMD; + } + return 0; +} + +#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) + +static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios) +{ + unsigned int cflag = tty->termios->c_cflag; + + if ( (cflag == old_termios->c_cflag) + && ( RELEVANT_IFLAG(tty->termios->c_iflag) + == RELEVANT_IFLAG(old_termios->c_iflag))) + return; + + + /* Handle turning off CRTSCTS */ + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) { + tty->hw_stopped = 0; + rs_start(tty); + } +} +/* + * This routine will shutdown a serial port; interrupts are disabled, and + * DTR is dropped if the hangup on close termio flag is on. + */ +static void shutdown(struct async_struct * info) +{ + unsigned long flags; + struct serial_state *state; + int retval; + + if (!(info->flags & ASYNC_INITIALIZED)) return; + + state = info->state; + +#ifdef SIMSERIAL_DEBUG + printk("Shutting down serial port %d (irq %d)....", info->line, + state->irq); +#endif + + save_flags(flags); cli(); /* Disable interrupts */ + + /* + * First unlink the serial port from the IRQ chain... + */ + if (info->next_port) + info->next_port->prev_port = info->prev_port; + if (info->prev_port) + info->prev_port->next_port = info->next_port; + else + IRQ_ports[state->irq] = info->next_port; + + /* + * Free the IRQ, if necessary + */ + if (state->irq && (!IRQ_ports[state->irq] || + !IRQ_ports[state->irq]->next_port)) { + if (IRQ_ports[state->irq]) { + free_irq(state->irq, NULL); + retval = request_irq(state->irq, rs_interrupt_single, + IRQ_T(info), "serial", NULL); + + if (retval) + printk("serial shutdown: request_irq: error %d" + " Couldn't reacquire IRQ.\n", retval); + } else + free_irq(state->irq, NULL); + } + + if (info->xmit.buf) { + free_page((unsigned long) info->xmit.buf); + info->xmit.buf = 0; + } + + if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); + + info->flags &= ~ASYNC_INITIALIZED; + restore_flags(flags); +} + +/* + * ------------------------------------------------------------ + * rs_close() + * + * This routine is called when the serial port gets closed. First, we + * wait for the last remaining data to be sent. Then, we unlink its + * async structure from the interrupt chain if necessary, and we free + * that IRQ if nothing is left in the chain. + * ------------------------------------------------------------ + */ +static void rs_close(struct tty_struct *tty, struct file * filp) +{ + struct async_struct * info = (struct async_struct *)tty->driver_data; + struct serial_state *state; + unsigned long flags; + + if (!info ) return; + + state = info->state; + + save_flags(flags); cli(); + + if (tty_hung_up_p(filp)) { +#ifdef SIMSERIAL_DEBUG + printk("rs_close: hung_up\n"); +#endif + MOD_DEC_USE_COUNT; + restore_flags(flags); + return; + } +#ifdef SIMSERIAL_DEBUG + printk("rs_close ttys%d, count = %d\n", info->line, state->count); +#endif + if ((tty->count == 1) && (state->count != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. state->count should always + * be one in these conditions. If it's greater than + * one, we've got real problems, since it means the + * serial port won't be shutdown. + */ + printk("rs_close: bad serial port count; tty->count is 1, " + "state->count is %d\n", state->count); + state->count = 1; + } + if (--state->count < 0) { + printk("rs_close: bad serial port count for ttys%d: %d\n", + info->line, state->count); + state->count = 0; + } + if (state->count) { + MOD_DEC_USE_COUNT; + restore_flags(flags); + return; + } + info->flags |= ASYNC_CLOSING; + restore_flags(flags); + + /* + * Now we wait for the transmit buffer to clear; and we notify + * the line discipline to only process XON/XOFF characters. + */ + shutdown(info); + if (tty->driver.flush_buffer) tty->driver.flush_buffer(tty); + if (tty->ldisc.flush_buffer) tty->ldisc.flush_buffer(tty); + info->event = 0; + info->tty = 0; + if (info->blocked_open) { + if (info->close_delay) { + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(info->close_delay); + } + wake_up_interruptible(&info->open_wait); + } + info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE|ASYNC_CLOSING); + wake_up_interruptible(&info->close_wait); + MOD_DEC_USE_COUNT; +} + +/* + * rs_wait_until_sent() --- wait until the transmitter is empty + */ +static void rs_wait_until_sent(struct tty_struct *tty, int timeout) +{ +} + + +/* + * rs_hangup() --- called by tty_hangup() when a hangup is signaled. + */ +static void rs_hangup(struct tty_struct *tty) +{ + struct async_struct * info = (struct async_struct *)tty->driver_data; + struct serial_state *state = info->state; + +#ifdef SIMSERIAL_DEBUG + printk("rs_hangup: called\n"); +#endif + + state = info->state; + + rs_flush_buffer(tty); + if (info->flags & ASYNC_CLOSING) + return; + shutdown(info); + + info->event = 0; + state->count = 0; + info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE); + info->tty = 0; + wake_up_interruptible(&info->open_wait); +} + + +static int get_async_struct(int line, struct async_struct **ret_info) +{ + struct async_struct *info; + struct serial_state *sstate; + + sstate = rs_table + line; + sstate->count++; + if (sstate->info) { + *ret_info = sstate->info; + return 0; + } + info = kmalloc(sizeof(struct async_struct), GFP_KERNEL); + if (!info) { + sstate->count--; + return -ENOMEM; + } + memset(info, 0, sizeof(struct async_struct)); + init_waitqueue_head(&info->open_wait); + init_waitqueue_head(&info->close_wait); + init_waitqueue_head(&info->delta_msr_wait); + info->magic = SERIAL_MAGIC; + info->port = sstate->port; + info->flags = sstate->flags; + info->xmit_fifo_size = sstate->xmit_fifo_size; + info->line = line; + info->tqueue.routine = do_softint; + info->tqueue.data = info; + info->state = sstate; + if (sstate->info) { + kfree_s(info, sizeof(struct async_struct)); + *ret_info = sstate->info; + return 0; + } + *ret_info = sstate->info = info; + return 0; +} + +static int +startup(struct async_struct *info) +{ + unsigned long flags; + int retval=0; + void (*handler)(int, void *, struct pt_regs *); + struct serial_state *state= info->state; + unsigned long page; + + page = get_free_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + save_flags(flags); cli(); + + if (info->flags & ASYNC_INITIALIZED) { + free_page(page); + goto errout; + } + + if (!state->port || !state->type) { + if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); + free_page(page); + goto errout; + } + if (info->xmit.buf) + free_page(page); + else + info->xmit.buf = (unsigned char *) page; + +#ifdef SIMSERIAL_DEBUG + printk("startup: ttys%d (irq %d)...", info->line, state->irq); +#endif + + /* + * Allocate the IRQ if necessary + */ + if (state->irq && (!IRQ_ports[state->irq] || + !IRQ_ports[state->irq]->next_port)) { + if (IRQ_ports[state->irq]) { + retval = -EBUSY; + goto errout; + } else + handler = rs_interrupt_single; + + retval = request_irq(state->irq, handler, IRQ_T(info), + "simserial", NULL); + if (retval) { + if (capable(CAP_SYS_ADMIN)) { + if (info->tty) + set_bit(TTY_IO_ERROR, + &info->tty->flags); + retval = 0; + } + goto errout; + } + } + + /* + * Insert serial port into IRQ chain. + */ + info->prev_port = 0; + info->next_port = IRQ_ports[state->irq]; + if (info->next_port) + info->next_port->prev_port = info; + IRQ_ports[state->irq] = info; + + if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); + + info->xmit.head = info->xmit.tail = 0; + +#if 0 + /* + * Set up serial timers... + */ + timer_table[RS_TIMER].expires = jiffies + 2*HZ/100; + timer_active |= 1 << RS_TIMER; +#endif + + /* + * Set up the tty->alt_speed kludge + */ + if (info->tty) { + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) + info->tty->alt_speed = 57600; + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) + info->tty->alt_speed = 115200; + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) + info->tty->alt_speed = 230400; + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) + info->tty->alt_speed = 460800; + } + + info->flags |= ASYNC_INITIALIZED; + restore_flags(flags); + return 0; + +errout: + restore_flags(flags); + return retval; +} + + +/* + * This routine is called whenever a serial port is opened. It + * enables interrupts for a serial port, linking in its async structure into + * the IRQ chain. It also performs the serial-specific + * initialization for the tty structure. + */ +static int rs_open(struct tty_struct *tty, struct file * filp) +{ + struct async_struct *info; + int retval, line; + unsigned long page; + + MOD_INC_USE_COUNT; + line = MINOR(tty->device) - tty->driver.minor_start; + if ((line < 0) || (line >= NR_PORTS)) { + MOD_DEC_USE_COUNT; + return -ENODEV; + } + retval = get_async_struct(line, &info); + if (retval) { + MOD_DEC_USE_COUNT; + return retval; + } + tty->driver_data = info; + info->tty = tty; + +#ifdef SIMSERIAL_DEBUG + printk("rs_open %s%d, count = %d\n", tty->driver.name, info->line, + info->state->count); +#endif + info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; + + if (!tmp_buf) { + page = get_free_page(GFP_KERNEL); + if (!page) { + /* MOD_DEC_USE_COUNT; "info->tty" will cause this? */ + return -ENOMEM; + } + if (tmp_buf) + free_page(page); + else + tmp_buf = (unsigned char *) page; + } + + /* + * If the port is the middle of closing, bail out now + */ + if (tty_hung_up_p(filp) || + (info->flags & ASYNC_CLOSING)) { + if (info->flags & ASYNC_CLOSING) + interruptible_sleep_on(&info->close_wait); + /* MOD_DEC_USE_COUNT; "info->tty" will cause this? */ +#ifdef SERIAL_DO_RESTART + return ((info->flags & ASYNC_HUP_NOTIFY) ? + -EAGAIN : -ERESTARTSYS); +#else + return -EAGAIN; +#endif + } + + /* + * Start up serial port + */ + retval = startup(info); + if (retval) { + /* MOD_DEC_USE_COUNT; "info->tty" will cause this? */ + return retval; + } + + if ((info->state->count == 1) && + (info->flags & ASYNC_SPLIT_TERMIOS)) { + if (tty->driver.subtype == SERIAL_TYPE_NORMAL) + *tty->termios = info->state->normal_termios; + else + *tty->termios = info->state->callout_termios; + } + + /* + * figure out which console to use (should be one already) + */ + console = console_drivers; + while (console) { + if ((console->flags & CON_ENABLED) && console->write) break; + console = console->next; + } + + info->session = current->session; + info->pgrp = current->pgrp; + +#ifdef SIMSERIAL_DEBUG + printk("rs_open ttys%d successful\n", info->line); +#endif + return 0; +} + +/* + * /proc fs routines.... + */ + +static inline int line_info(char *buf, struct serial_state *state) +{ + return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n", + state->line, uart_config[state->type].name, + state->port, state->irq); +} + +int rs_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int i, len = 0, l; + off_t begin = 0; + + len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version); + for (i = 0; i < NR_PORTS && len < 4000; i++) { + l = line_info(page + len, &rs_table[i]); + len += l; + if (len+begin > off+count) + goto done; + if (len+begin < off) { + begin += len; + len = 0; + } + } + *eof = 1; +done: + if (off >= len+begin) + return 0; + *start = page + (begin-off); + return ((count < begin+len-off) ? count : begin+len-off); +} + +/* + * --------------------------------------------------------------------- + * rs_init() and friends + * + * rs_init() is called at boot-time to initialize the serial driver. + * --------------------------------------------------------------------- + */ + +/* + * This routine prints out the appropriate serial driver version + * number, and identifies which options were configured into this + * driver. + */ +static inline void show_serial_version(void) +{ + printk(KERN_INFO "%s version %s with", serial_name, serial_version); + printk(" no serial options enabled\n"); +} + +/* + * The serial driver boot-time initialization code! + */ +int __init +simrs_init (void) +{ + int i; + struct serial_state *state; + + show_serial_version(); + + /* connect the platform's keyboard interrupt to SIMSERIAL_IRQ */ + ia64_ssc_connect_irq(KEYBOARD_INTR, SIMSERIAL_IRQ); + + /* Initialize the tty_driver structure */ + + memset(&serial_driver, 0, sizeof(struct tty_driver)); + serial_driver.magic = TTY_DRIVER_MAGIC; + serial_driver.driver_name = "simserial"; + serial_driver.name = "ttyS"; + serial_driver.major = TTY_MAJOR; + serial_driver.minor_start = 64; + serial_driver.num = 1; + serial_driver.type = TTY_DRIVER_TYPE_SERIAL; + serial_driver.subtype = SERIAL_TYPE_NORMAL; + serial_driver.init_termios = tty_std_termios; + serial_driver.init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + serial_driver.flags = TTY_DRIVER_REAL_RAW; + serial_driver.refcount = &serial_refcount; + serial_driver.table = serial_table; + serial_driver.termios = serial_termios; + serial_driver.termios_locked = serial_termios_locked; + + serial_driver.open = rs_open; + serial_driver.close = rs_close; + serial_driver.write = rs_write; + serial_driver.put_char = rs_put_char; + serial_driver.flush_chars = rs_flush_chars; + serial_driver.write_room = rs_write_room; + serial_driver.chars_in_buffer = rs_chars_in_buffer; + serial_driver.flush_buffer = rs_flush_buffer; + serial_driver.ioctl = rs_ioctl; + serial_driver.throttle = rs_throttle; + serial_driver.unthrottle = rs_unthrottle; + serial_driver.send_xchar = rs_send_xchar; + serial_driver.set_termios = rs_set_termios; + serial_driver.stop = rs_stop; + serial_driver.start = rs_start; + serial_driver.hangup = rs_hangup; + serial_driver.break_ctl = rs_break; + serial_driver.wait_until_sent = rs_wait_until_sent; + serial_driver.read_proc = rs_read_proc; + + /* + * Let's have a little bit of fun ! + */ + for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { + + if (state->type == PORT_UNKNOWN) continue; + + printk(KERN_INFO "ttyS%02d at 0x%04lx (irq = %d) is a %s\n", + state->line, + state->port, state->irq, + uart_config[state->type].name); + } + /* + * The callout device is just like normal device except for + * major number and the subtype code. + */ + callout_driver = serial_driver; + callout_driver.name = "cua"; + callout_driver.major = TTYAUX_MAJOR; + callout_driver.subtype = SERIAL_TYPE_CALLOUT; + callout_driver.read_proc = 0; + callout_driver.proc_entry = 0; + + if (tty_register_driver(&serial_driver)) + panic("Couldn't register simserial driver\n"); + + if (tty_register_driver(&callout_driver)) + panic("Couldn't register callout driver\n"); + + return 0; +} diff -urN linux-2.4.0-test1/drivers/char/tty_io.c linux-2.4.0-test1-lia/drivers/char/tty_io.c --- linux-2.4.0-test1/drivers/char/tty_io.c Sat May 20 12:07:56 2000 +++ linux-2.4.0-test1-lia/drivers/char/tty_io.c Fri May 26 20:38:45 2000 @@ -2298,6 +2298,9 @@ #ifdef CONFIG_ESPSERIAL /* init ESP before rs, so rs doesn't see the port */ espserial_init(); #endif +#ifdef CONFIG_SIM_SERIAL + simrs_init(); +#endif #if defined(CONFIG_MVME162_SCC) || defined(CONFIG_BVME6000_SCC) || defined(CONFIG_MVME147_SCC) vme_scc_init(); #endif diff -urN linux-2.4.0-test1/drivers/ide/piix.c linux-2.4.0-test1-lia/drivers/ide/piix.c --- linux-2.4.0-test1/drivers/ide/piix.c Wed May 24 08:40:41 2000 +++ linux-2.4.0-test1-lia/drivers/ide/piix.c Thu Jun 1 01:11:08 2000 @@ -424,9 +424,10 @@ void __init ide_init_piix (ide_hwif_t *hwif) { #ifndef CONFIG_IA64 + /* autoprobe instead... --davidm 00/04/20 */ if (!hwif->irq) hwif->irq = hwif->channel ? 15 : 14; -#endif /* CONFIG_IA64 */ +#endif hwif->tuneproc = &piix_tune_drive; hwif->drives[0].autotune = 1; diff -urN linux-2.4.0-test1/drivers/net/Config.in linux-2.4.0-test1-lia/drivers/net/Config.in --- linux-2.4.0-test1/drivers/net/Config.in Sat May 20 12:05:30 2000 +++ linux-2.4.0-test1-lia/drivers/net/Config.in Fri May 26 20:38:46 2000 @@ -7,6 +7,7 @@ tristate 'Dummy net driver support' CONFIG_DUMMY tristate 'Bonding driver support' CONFIG_BONDING +tristate 'Simulated Ethernet ' CONFIG_SIMETH tristate 'EQL (serial line load balancing) support' CONFIG_EQUALIZER if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then if [ "$CONFIG_NETLINK" = "y" ]; then diff -urN linux-2.4.0-test1/drivers/net/Makefile linux-2.4.0-test1-lia/drivers/net/Makefile --- linux-2.4.0-test1/drivers/net/Makefile Sat May 20 12:05:30 2000 +++ linux-2.4.0-test1-lia/drivers/net/Makefile Fri May 26 20:38:46 2000 @@ -205,6 +205,7 @@ obj-$(CONFIG_ES3210) += es3210.o 8390.o obj-$(CONFIG_LNE390) += lne390.o 8390.o obj-$(CONFIG_NE3210) += ne3210.o 8390.o +obj-$(CONFIG_SIMETH) += simeth.o obj-$(CONFIG_PPP) += ppp_generic.o slhc.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o diff -urN linux-2.4.0-test1/drivers/net/Space.c linux-2.4.0-test1-lia/drivers/net/Space.c --- linux-2.4.0-test1/drivers/net/Space.c Mon May 15 12:00:34 2000 +++ linux-2.4.0-test1-lia/drivers/net/Space.c Fri May 26 20:38:46 2000 @@ -119,6 +119,9 @@ extern int apfddi_init(struct net_device *dev); extern int skfp_probe(struct net_device *dev); +/* Simulated Ethernet */ +extern int simeth_probe(struct net_device *); + /* Fibre Channel adapters */ extern int iph5526_probe(struct net_device *dev); @@ -354,6 +357,9 @@ #endif #ifdef CONFIG_HPLANCE /* HP300 internal Ethernet */ {hplance_probe, 0}, +#endif +#ifdef CONFIG_SIMETH + {simeth_probe, 0}, #endif #ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ {mvme147lance_probe, 0}, diff -urN linux-2.4.0-test1/drivers/net/eepro100.c linux-2.4.0-test1-lia/drivers/net/eepro100.c --- linux-2.4.0-test1/drivers/net/eepro100.c Sat May 13 08:19:21 2000 +++ linux-2.4.0-test1-lia/drivers/net/eepro100.c Fri May 26 20:38:46 2000 @@ -50,9 +50,18 @@ static int txdmacount = 128; static int rxdmacount = 0; +#ifdef __ia64__ +/* + * Bug: this driver may generate unaligned accesses when not copying + * an incoming packet. Setting rx_copybreak to a large value force a + * copy and prevents unaligned accesses. + */ +static int rx_copybreak = 0x10000; +#else /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method. Lower values use more memory, but are faster. */ static int rx_copybreak = 200; +#endif /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 20; @@ -429,14 +438,14 @@ /* The Speedo3 Rx and Tx frame/buffer descriptors. */ struct descriptor { /* A generic descriptor. */ - s32 cmd_status; /* All command and status fields. */ + volatile s32 cmd_status; /* All command and status fields. */ u32 link; /* struct descriptor * */ unsigned char params[0]; }; /* The Speedo3 Rx and Tx buffer descriptors. */ struct RxFD { /* Receive frame descriptor. */ - s32 status; + volatile s32 status; u32 link; /* struct RxFD * */ u32 rx_buf_addr; /* void * */ u32 count; diff -urN linux-2.4.0-test1/drivers/net/simeth.c linux-2.4.0-test1-lia/drivers/net/simeth.c --- linux-2.4.0-test1/drivers/net/simeth.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/drivers/net/simeth.c Tue May 2 12:46:36 2000 @@ -0,0 +1,611 @@ +/* + * Simulated Ethernet Driver + * + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 Stephane Eranain + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SIMETH_IRQ 0xed +#define SIMETH_RECV_MAX 10 + +/* + * Maximum possible received frame for Ethernet. + * We preallocate an sk_buff of that size to avoid costly + * memcpy for temporary buffer into sk_buff. We do basically + * what's done in other drivers, like eepro with a ring. + * The difference is, of course, that we don't have real DMA !!! + */ +#define SIMETH_FRAME_SIZE ETH_FRAME_LEN + + +#define SSC_NETDEV_PROBE 100 +#define SSC_NETDEV_SEND 101 +#define SSC_NETDEV_RECV 102 +#define SSC_NETDEV_ATTACH 103 +#define SSC_NETDEV_DETACH 104 + +#define NETWORK_INTR 8 + +/* + * This structure is need for the module version + * It hasn't been tested yet + */ +struct simeth_local { + struct net_device *next_module; + struct net_device_stats stats; + int simfd; /* descriptor in the simulator */ +}; + +static int simeth_probe1(struct net_device *dev); +static int simeth_open(struct net_device *dev); +static int simeth_close(struct net_device *dev); +static int simeth_tx(struct sk_buff *skb, struct net_device *dev); +static int simeth_rx(struct net_device *dev); +static struct net_device_stats *simeth_get_stats(struct net_device *dev); +static void simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs); +static void set_multicast_list(struct net_device *dev); +static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr); + +static char *simeth_version="v0.2"; + +/* + * This variable is used to establish a mapping between the Linux/ia64 kernel + * and the host linux kernel. + * + * As of today, we support only one card, even though most of the code + * is ready for many more. The mapping is then: + * linux/ia64 -> linux/x86 + * eth0 -> eth1 + * + * In the future, we some string operations, we could easily support up + * to 10 cards (0-9). + * + * The default mapping can be changed on the kernel command line by + * specifying simeth=ethX (or whatever string you want). + */ +static char *simeth_device="eth0"; /* default host interface to use */ + + + +static volatile unsigned int card_count; /* how many cards "found" so far */ +static int simeth_debug=0; /* set to 1 to get debug information */ + +/* + * Used to catch IFF_UP & IFF_DOWN events + */ +static struct notifier_block simeth_dev_notifier = { + simeth_device_event, + 0 +}; + + +/* + * Function used when using a kernel command line option. + * + * Format: simeth=interface_name (like eth0) + */ +static int __init +simeth_setup(char *str) +{ + simeth_device = str; + return 1; +} + +__setup("simeth=", simeth_setup); + +/* + * Function used to probe for simeth devices when not installed + * as a loadable module + */ + +int __init +simeth_probe (struct net_device *dev) +{ + return simeth_probe1(dev); +} + +extern long ia64_ssc (long, long, long, long, int); +extern void ia64_ssc_connect_irq (long intr, long irq); + +static inline int +netdev_probe(char *name, unsigned char *ether) +{ + return ia64_ssc(__pa(name), __pa(ether), 0,0, SSC_NETDEV_PROBE); +} + + +static inline int +netdev_connect(int irq) +{ + /* XXX Fix me + * this does not support multiple cards + * also no return value + */ + ia64_ssc_connect_irq(NETWORK_INTR, irq); + return 0; +} + +static inline int +netdev_attach(int fd, int irq, unsigned int ipaddr) +{ + /* this puts the host interface in the right mode (start interupting) */ + return ia64_ssc(fd, ipaddr, 0,0, SSC_NETDEV_ATTACH); +} + + +static inline int +netdev_detach(int fd) +{ + /* + * inactivate the host interface (don't interrupt anymore) */ + return ia64_ssc(fd, 0,0,0, SSC_NETDEV_DETACH); +} + +static inline int +netdev_send(int fd, unsigned char *buf, unsigned int len) +{ + return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_SEND); +} + +static inline int +netdev_read(int fd, unsigned char *buf, unsigned int len) +{ + return ia64_ssc(fd, __pa(buf), len, 0, SSC_NETDEV_RECV); +} + +/* + * Function shared with module code, so cannot be in init section + * + * So far this function "detects" only one card (test_&_set) but could + * be extended easily. + * + * Return: + * - -ENODEV is no device found + * - -ENOMEM is no more memory + * - 0 otherwise + */ +static int +simeth_probe1(struct net_device *dev) +{ + struct simeth_local *local; + int dev_was_null = 0; + int fd, i; + + /* + * XXX Fix me + * let's support just one card for now + */ + if ( test_and_set_bit(0, &card_count) ) return -ENODEV; + + /* true when coming from init_module */ + if ( dev == NULL ) { + printk("simeth: dev was null\n"); + dev = init_etherdev(0,0); + if ( dev == NULL ) return -ENOMEM; + dev_was_null = 1; + } + + /* + * check with the simulator for the device + */ + fd = netdev_probe(simeth_device, dev->dev_addr); + if ( fd == -1 ) { + /* don't leak memory */ + if (dev_was_null) kfree(dev); + return -ENODEV; + } + + + /* + * XXX Fix me + * does not support more than one card ! + */ + dev->irq = SIMETH_IRQ; + + dev->priv = kmalloc(sizeof(struct simeth_local), GFP_KERNEL); + if (dev->priv == NULL) { + if ( dev_was_null ) kfree(dev); + return -ENOMEM; + } + + /* + * attach the interrupt in the simulator, this does enable interrupts + * until a netdev_attach() is called + */ + netdev_connect(dev->irq); + + memset(dev->priv, 0, sizeof(struct simeth_local)); + + local = dev->priv; + local->simfd = fd; /* keep track of underlying file descriptor */ + local->next_module = NULL; + + dev->open = simeth_open; + dev->stop = simeth_close; + dev->hard_start_xmit = simeth_tx; + dev->get_stats = simeth_get_stats; + dev->set_multicast_list = set_multicast_list; /* no yet used */ + + /* Fill in the fields of the device structure with ethernet-generic values. */ + ether_setup(dev); + + printk("simeth: %s alpha\n", simeth_version); + printk("%s: hosteth=%s simfd=%d, HwAddr", dev->name, simeth_device, local->simfd); + for(i = 0; i < ETH_ALEN; i++) { + printk(" %2.2x", dev->dev_addr[i]); + } + printk(", IRQ %d\n", dev->irq); + +#ifdef MODULE + local->next_module = simeth_dev; + simeth_dev = dev; +#endif + /* + * XXX Fix me + * would not work with more than one device ! + */ + register_netdevice_notifier(&simeth_dev_notifier); + + return 0; +} + +/* + * actually binds the device to an interrupt vector + */ +static int +simeth_open(struct net_device *dev) +{ + if (request_irq(dev->irq, simeth_interrupt, 0, "simeth", dev)) { + printk ("simeth: unable to get IRQ %d.\n", dev->irq); + return -EAGAIN; + } + + netif_start_queue(dev); + MOD_INC_USE_COUNT; + + return 0; +} + +/* copied from lapbether.c */ +static __inline__ int dev_is_ethdev(struct net_device *dev) +{ + return ( dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5)); +} + + +/* + * Handler for IFF_UP or IFF_DOWN + * + * The reason for that is that we don't want to be interrupted when the + * interface is down. There is no way to unconnect in the simualtor. Instead + * we use this function to shutdown packet processing in the frame filter + * in the simulator. Thus no interrupts are generated + * + * + * That's also the place where we pass the IP address of this device to the + * simulator so that that we can start filtering packets for it + * + * There may be a better way of doing this, but I don't know which yet. + */ +static int +simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) +{ + struct net_device *dev = (struct net_device *)ptr; + struct simeth_local *local; + struct in_device *in_dev; + struct in_ifaddr **ifap = NULL; + struct in_ifaddr *ifa = NULL; + int r; + + + if ( ! dev ) { + printk(KERN_WARNING "simeth_device_event dev=0\n"); + return NOTIFY_DONE; + } + + if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE; + + /* + * Check whether or not it's for an ethernet device + * + * XXX Fixme: This works only as long as we support one + * type of ethernet device. + */ + if ( !dev_is_ethdev(dev) ) return NOTIFY_DONE; + + if ((in_dev=dev->ip_ptr) != NULL) { + for (ifap=&in_dev->ifa_list; (ifa=*ifap) != NULL; ifap=&ifa->ifa_next) + if (strcmp(dev->name, ifa->ifa_label) == 0) break; + } + if ( ifa == NULL ) { + printk("simeth_open: can't find device %s's ifa\n", dev->name); + return NOTIFY_DONE; + } + + printk("simeth_device_event: %s ipaddr=0x%x\n", dev->name, htonl(ifa->ifa_local)); + + /* + * XXX Fix me + * if the device was up, and we're simply reconfiguring it, not sure + * we get DOWN then UP. + */ + + local = dev->priv; + /* now do it for real */ + r = event == NETDEV_UP ? + netdev_attach(local->simfd, dev->irq, htonl(ifa->ifa_local)): + netdev_detach(local->simfd); + + printk("simeth: netdev_attach/detach: event=%s ->%d\n", event == NETDEV_UP ? "attach":"detach", r); + + return NOTIFY_DONE; +} + +static int +simeth_close(struct net_device *dev) +{ + netif_stop_queue(dev); + + free_irq(dev->irq, dev); + + MOD_DEC_USE_COUNT; + + return 0; +} + +/* + * Only used for debug + */ +static void +frame_print(unsigned char *from, unsigned char *frame, int len) +{ + int i; + + printk("%s: (%d) %02x", from, len, frame[0] & 0xff); + for(i=1; i < 6; i++ ) { + printk(":%02x", frame[i] &0xff); + } + printk(" %2x", frame[6] &0xff); + for(i=7; i < 12; i++ ) { + printk(":%02x", frame[i] &0xff); + } + printk(" [%02x%02x]\n", frame[12], frame[13]); + + for(i=14; i < len; i++ ) { + printk("%02x ", frame[i] &0xff); + if ( (i%10)==0) printk("\n"); + } + printk("\n"); +} + + +/* + * Function used to transmit of frame, very last one on the path before + * going to the simulator. + */ +static int +simeth_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct simeth_local *local = (struct simeth_local *)dev->priv; + +#if 0 + /* ensure we have at least ETH_ZLEN bytes (min frame size) */ + unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + /* Where do the extra padding bytes comes from inthe skbuff ? */ +#else + /* the real driver in the host system is going to take care of that + * or maybe it's the NIC itself. + */ + unsigned int length = skb->len; +#endif + + local->stats.tx_bytes += skb->len; + local->stats.tx_packets++; + + + if (simeth_debug > 5) frame_print("simeth_tx", skb->data, length); + + netdev_send(local->simfd, skb->data, length); + + /* + * we are synchronous on write, so we don't simulate a + * trasnmit complete interrupt, thus we don't need to arm a tx + */ + + dev_kfree_skb(skb); + return 0; +} + +static inline struct sk_buff * +make_new_skb(struct net_device *dev) +{ + struct sk_buff *nskb; + + /* + * The +2 is used to make sure that the IP header is nicely + * aligned (on 4byte boundary I assume 14+2=16) + */ + nskb = dev_alloc_skb(SIMETH_FRAME_SIZE + 2); + if ( nskb == NULL ) { + printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name); + return NULL; + } + nskb->dev = dev; + + skb_reserve(nskb, 2); /* Align IP on 16 byte boundaries */ + + skb_put(nskb,SIMETH_FRAME_SIZE); + + return nskb; +} + +/* + * called from interrupt handler to process a received frame + */ +static int +simeth_rx(struct net_device *dev) +{ + struct simeth_local *local; + struct sk_buff *skb; + int len; + int rcv_count = SIMETH_RECV_MAX; + + local = (struct simeth_local *)dev->priv; + /* + * the loop concept has been borrowed from other drivers + * looks to me like it's a throttling thing to avoid pushing to many + * packets at one time into the stack. Making sure we can process them + * upstream and make forward progress overall + */ + do { + if ( (skb=make_new_skb(dev)) == NULL ) { + printk(KERN_NOTICE "%s: memory squeeze. dropping packet.\n", dev->name); + local->stats.rx_dropped++; + return 0; + } + /* + * Read only one frame at a time + */ + len = netdev_read(local->simfd, skb->data, SIMETH_FRAME_SIZE); + if ( len == 0 ) { + if ( simeth_debug > 0 ) printk(KERN_WARNING "%s: count=%d netdev_read=0\n", dev->name, SIMETH_RECV_MAX-rcv_count); + break; + } +#if 0 + /* + * XXX Fix me + * Should really do a csum+copy here + */ + memcpy(skb->data, frame, len); +#endif + skb->protocol = eth_type_trans(skb, dev); + + if ( simeth_debug > 6 ) frame_print("simeth_rx", skb->data, len); + + /* + * push the packet up & trigger software interrupt + */ + netif_rx(skb); + + local->stats.rx_packets++; + local->stats.rx_bytes += len; + + } while ( --rcv_count ); + + return len; /* 0 = nothing left to read, otherwise, we can try again */ +} + +/* + * Interrupt handler (Yes, we can do it too !!!) + */ +static void +simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs) +{ + struct net_device *dev = dev_id; + + if ( dev == NULL ) { + printk(KERN_WARNING "simeth: irq %d for unknown device\n", irq); + return; + } + + /* + * very simple loop because we get interrupts only when receving + */ + while (simeth_rx(dev)); +} + +static struct net_device_stats * +simeth_get_stats(struct net_device *dev) +{ + struct simeth_local *local = (struct simeth_local *) dev->priv; + + return &local->stats; +} + +/* fake multicast ability */ +static void +set_multicast_list(struct net_device *dev) +{ + printk(KERN_WARNING "%s: set_multicast_list called\n", dev->name); +} + +#ifdef CONFIG_NET_FASTROUTE +static int +simeth_accept_fastpath(struct net_device *dev, struct dst_entry *dst) +{ + printk(KERN_WARNING "%s: simeth_accept_fastpath called\n", dev->name); + return -1; +} +#endif + + +#ifdef MODULE + + +static int +simeth_init(void) +{ + unsigned int cards_found = 0; + + /* iterate over probe */ + + while ( simeth_probe1(NULL) == 0 ) cards_found++; + + return cards_found ? 0 : -ENODEV; +} + + +int +init_module(void) +{ + simeth_dev = NULL; + + /* the register_netdev is done "indirectly by ether_initdev() */ + + return simeth_init(); +} + +void +cleanup_module(void) +{ + struct net_device *next; + + while ( simeth_dev ) { + + next = ((struct simeth_private *)simeth_dev->priv)->next_module; + + unregister_netdev(simeth_dev); + + kfree(simeth_dev); + + simeth_dev = next; + } + /* + * XXX fix me + * not clean wihen multiple devices + */ + unregister_netdevice_notifier(&simeth_dev_notifier); +} +#endif /* MODULE */ diff -urN linux-2.4.0-test1/drivers/scsi/Makefile linux-2.4.0-test1-lia/drivers/scsi/Makefile --- linux-2.4.0-test1/drivers/scsi/Makefile Mon Mar 27 09:44:58 2000 +++ linux-2.4.0-test1-lia/drivers/scsi/Makefile Tue May 2 12:46:36 2000 @@ -653,6 +653,10 @@ endif endif +ifeq ($(CONFIG_SCSI_SIM),y) +L_OBJS += simscsi.o +endif + ifeq ($(CONFIG_SCSI_EATA),y) L_OBJS += eata.o else diff -urN linux-2.4.0-test1/drivers/scsi/hosts.c linux-2.4.0-test1-lia/drivers/scsi/hosts.c --- linux-2.4.0-test1/drivers/scsi/hosts.c Mon Mar 27 09:48:11 2000 +++ linux-2.4.0-test1-lia/drivers/scsi/hosts.c Tue May 2 12:46:36 2000 @@ -321,6 +321,10 @@ #include "scsi_debug.h" #endif +#ifdef CONFIG_SCSI_SIM +#include "simscsi.h" +#endif + #ifdef CONFIG_SCSI_ACORNSCSI_3 #include "../acorn/scsi/acornscsi.h" #endif @@ -625,6 +629,9 @@ #endif #ifdef CONFIG_SCSI_PLUTO PLUTO, +#endif +#ifdef CONFIG_SCSI_SIM + SIMSCSI, #endif #ifdef CONFIG_ARCH_ACORN #ifdef CONFIG_SCSI_ACORNSCSI_3 diff -urN linux-2.4.0-test1/drivers/scsi/simscsi.c linux-2.4.0-test1-lia/drivers/scsi/simscsi.c --- linux-2.4.0-test1/drivers/scsi/simscsi.c Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/drivers/scsi/simscsi.c Thu Jun 1 01:11:28 2000 @@ -0,0 +1,359 @@ +/* + * Simulated SCSI driver. + * + * Copyright (C) 1999 Hewlett-Packard Co + * Copyright (C) 1999 David Mosberger-Tang + * Copyright (C) 1999 Stephane Eranian + * + * 99/12/18 David Mosberger Added support for READ10/WRITE10 needed by linux v2.3.33 + */ +#include +#include +#include +#include +#include + +#include + +#include + +#include "scsi.h" +#include "sd.h" +#include "hosts.h" +#include "simscsi.h" + +#define DEBUG_SIMSCSI 0 + +/* Simulator system calls: */ + +#define SSC_OPEN 50 +#define SSC_CLOSE 51 +#define SSC_READ 52 +#define SSC_WRITE 53 +#define SSC_GET_COMPLETION 54 +#define SSC_WAIT_COMPLETION 55 + +#define SSC_WRITE_ACCESS 2 +#define SSC_READ_ACCESS 1 + +struct timer_list disk_timer; + +struct disk_req { + unsigned long addr; + unsigned len; +}; + +struct disk_stat { + int fd; + unsigned count; +}; + +extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr); + +static int desc[8] = { -1, -1, -1, -1, -1, -1, -1, -1 }; + +static struct queue_entry { + Scsi_Cmnd *sc; +} queue[SIMSCSI_REQ_QUEUE_LEN]; + +static int rd, wr; +static atomic_t num_reqs = ATOMIC_INIT(0); + +/* base name for default disks */ +static char *simscsi_root = DEFAULT_SIMSCSI_ROOT; + +#define MAX_ROOT_LEN 128 + +/* + * used to setup a new base for disk images + * to use /foo/bar/disk[a-z] as disk images + * you have to specify simscsi=/foo/bar/disk on the command line + */ +static int __init +simscsi_setup (char *s) +{ + /* XXX Fix me we may need to strcpy() ? */ + if (strlen(s) > MAX_ROOT_LEN) { + printk("simscsi_setup: prefix too long---using default %s\n", simscsi_root); + } + simscsi_root = s; + return 1; +} + +__setup("simscsi=", simscsi_setup); + +static void +simscsi_interrupt (unsigned long val) +{ + unsigned long flags; + Scsi_Cmnd *sc; + + spin_lock_irqsave(&io_request_lock, flags); + { + while ((sc = queue[rd].sc) != 0) { + atomic_dec(&num_reqs); + queue[rd].sc = 0; +#if DEBUG_SIMSCSI + printk("simscsi_interrupt: done with %ld\n", sc->serial_number); +#endif + (*sc->scsi_done)(sc); + rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN; + } + } + spin_unlock_irqrestore(&io_request_lock, flags); +} + +int +simscsi_detect (Scsi_Host_Template *templ) +{ + templ->proc_name = "simscsi"; + init_timer(&disk_timer); + disk_timer.function = simscsi_interrupt; + return 1; /* fake one SCSI host adapter */ +} + +int +simscsi_release (struct Scsi_Host *host) +{ + return 0; /* this is easy... */ +} + +const char * +simscsi_info (struct Scsi_Host *host) +{ + return "simulated SCSI host adapter"; +} + +int +simscsi_abort (Scsi_Cmnd *cmd) +{ + printk ("simscsi_abort: unimplemented\n"); + return SCSI_ABORT_SUCCESS; +} + +int +simscsi_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) +{ + printk ("simscsi_reset: unimplemented\n"); + return SCSI_RESET_SUCCESS; +} + +int +simscsi_biosparam (Disk *disk, kdev_t n, int ip[]) +{ + int size = disk->capacity; + + ip[0] = 64; + ip[1] = 32; + ip[2] = size >> 11; + return 0; +} + +static void +simscsi_readwrite (Scsi_Cmnd *sc, int mode, unsigned long offset, unsigned long len) +{ + struct disk_stat stat; + struct disk_req req; + + req.addr = __pa(sc->request_buffer); + req.len = len; /* # of bytes to transfer */ + + if (sc->request_bufflen < req.len) + return; + + stat.fd = desc[sc->target]; +#if DEBUG_SIMSCSI + printk("simscsi_%s @ %lx (off %lx)\n", + mode == SSC_READ ? "read":"write", req.addr, offset); +#endif + ia64_ssc(stat.fd, 1, __pa(&req), offset, mode); + ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); + + if (stat.count == req.len) { + sc->result = GOOD; + } else { + sc->result = DID_ERROR << 16; + } +} + +static void +simscsi_sg_readwrite (Scsi_Cmnd *sc, int mode, unsigned long offset) +{ + int list_len = sc->use_sg; + struct scatterlist *sl = (struct scatterlist *)sc->buffer; + struct disk_stat stat; + struct disk_req req; + + stat.fd = desc[sc->target]; + + while (list_len) { + req.addr = __pa(sl->address); + req.len = sl->length; +#if DEBUG_SIMSCSI + printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n", + mode == SSC_READ ? "read":"write", req.addr, offset, list_len, sl->length); +#endif + ia64_ssc(stat.fd, 1, __pa(&req), offset, mode); + ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); + + /* should not happen in our case */ + if (stat.count != req.len) { + sc->result = DID_ERROR << 16; + return; + } + offset += sl->length; + sl++; + list_len--; + } + sc->result = GOOD; +} + +/* + * function handling both READ_6/WRITE_6 (non-scatter/gather mode) + * commands. + * Added 02/26/99 S.Eranian + */ +static void +simscsi_readwrite6 (Scsi_Cmnd *sc, int mode) +{ + unsigned long offset; + + offset = (((sc->cmnd[1] & 0x1f) << 16) | (sc->cmnd[2] << 8) | sc->cmnd[3])*512; + if (sc->use_sg > 0) + simscsi_sg_readwrite(sc, mode, offset); + else + simscsi_readwrite(sc, mode, offset, sc->cmnd[4]*512); +} + + +static void +simscsi_readwrite10 (Scsi_Cmnd *sc, int mode) +{ + unsigned long offset; + + offset = ( (sc->cmnd[2] << 24) | (sc->cmnd[3] << 16) + | (sc->cmnd[4] << 8) | (sc->cmnd[5] << 0))*512; + if (sc->use_sg > 0) + simscsi_sg_readwrite(sc, mode, offset); + else + simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); +} + +int +simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *)) +{ + char fname[MAX_ROOT_LEN+16]; + char *buf; + +#if DEBUG_SIMSCSI + register long sp asm ("sp"); + printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%lu,sp=%lx,done=%p\n", + sc->target, sc->cmnd[0], sc->serial_number, sp, done); +#endif + + sc->result = DID_BAD_TARGET << 16; + sc->scsi_done = done; + if (sc->target <= 7 && sc->lun == 0) { + switch (sc->cmnd[0]) { + case INQUIRY: + if (sc->request_bufflen < 35) { + break; + } + sprintf (fname, "%s%c", simscsi_root, 'a' + sc->target); + desc[sc->target] = ia64_ssc (__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS, + 0, 0, SSC_OPEN); + if (desc[sc->target] < 0) { + /* disk doesn't exist... */ + break; + } + buf = sc->request_buffer; + buf[0] = 0; /* magnetic disk */ + buf[1] = 0; /* not a removable medium */ + buf[2] = 2; /* SCSI-2 compliant device */ + buf[3] = 2; /* SCSI-2 response data format */ + buf[4] = 31; /* additional length (bytes) */ + buf[5] = 0; /* reserved */ + buf[6] = 0; /* reserved */ + buf[7] = 0; /* various flags */ + memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); + sc->result = GOOD; + break; + + case TEST_UNIT_READY: + sc->result = GOOD; + break; + + case READ_6: + if (desc[sc->target] < 0 ) + break; + simscsi_readwrite6(sc, SSC_READ); + break; + + case READ_10: + if (desc[sc->target] < 0 ) + break; + simscsi_readwrite10(sc, SSC_READ); + break; + + case WRITE_6: + if (desc[sc->target] < 0) + break; + simscsi_readwrite6(sc, SSC_WRITE); + break; + + case WRITE_10: + if (desc[sc->target] < 0) + break; + simscsi_readwrite10(sc, SSC_WRITE); + break; + + + case READ_CAPACITY: + if (desc[sc->target] < 0 || sc->request_bufflen < 8) { + break; + } + buf = sc->request_buffer; + + /* pretend to be a 1GB disk (partition table contains real stuff): */ + buf[0] = 0x00; + buf[1] = 0x1f; + buf[2] = 0xff; + buf[3] = 0xff; + /* set block size of 512 bytes: */ + buf[4] = 0; + buf[5] = 0; + buf[6] = 2; + buf[7] = 0; + sc->result = GOOD; + break; + + case MODE_SENSE: + printk("MODE_SENSE\n"); + break; + + case START_STOP: + printk("START_STOP\n"); + break; + + default: + panic("simscsi: unknown SCSI command %u\n", sc->cmnd[0]); + } + } + if (sc->result == DID_BAD_TARGET) { + sc->result |= DRIVER_SENSE << 24; + sc->sense_buffer[0] = 0x70; + sc->sense_buffer[2] = 0x00; + } + if (atomic_read(&num_reqs) >= SIMSCSI_REQ_QUEUE_LEN) { + panic("Attempt to queue command while command is pending!!"); + } + atomic_inc(&num_reqs); + queue[wr].sc = sc; + wr = (wr + 1) % SIMSCSI_REQ_QUEUE_LEN; + + if (!timer_pending(&disk_timer)) { + disk_timer.expires = jiffies + HZ/20; + add_timer(&disk_timer); + } + return 0; +} diff -urN linux-2.4.0-test1/drivers/scsi/simscsi.h linux-2.4.0-test1-lia/drivers/scsi/simscsi.h --- linux-2.4.0-test1/drivers/scsi/simscsi.h Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/drivers/scsi/simscsi.h Tue May 2 12:46:36 2000 @@ -0,0 +1,39 @@ +/* + * Simulated SCSI driver. + * + * Copyright (C) 1999 Hewlett-Packard Co + * Copyright (C) 1999 David Mosberger-Tang + */ +#ifndef SIMSCSI_H +#define SIMSCSI_H + +#define SIMSCSI_REQ_QUEUE_LEN 64 + +#define DEFAULT_SIMSCSI_ROOT "/lia/sd" + +extern int simscsi_detect (Scsi_Host_Template *); +extern int simscsi_release (struct Scsi_Host *); +extern const char *simscsi_info (struct Scsi_Host *); +extern int simscsi_queuecommand (Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +extern int simscsi_abort (Scsi_Cmnd *); +extern int simscsi_reset (Scsi_Cmnd *, unsigned int); +extern int simscsi_biosparam (Disk *, kdev_t, int[]); + +#define SIMSCSI { \ + detect: simscsi_detect, \ + release: simscsi_release, \ + info: simscsi_info, \ + queuecommand: simscsi_queuecommand, \ + abort: simscsi_abort, \ + reset: simscsi_reset, \ + bios_param: simscsi_biosparam, \ + can_queue: SIMSCSI_REQ_QUEUE_LEN, \ + this_id: -1, \ + sg_tablesize: 32, \ + cmd_per_lun: SIMSCSI_REQ_QUEUE_LEN, \ + present: 0, \ + unchecked_isa_dma: 0, \ + use_clustering: DISABLE_CLUSTERING \ +} + +#endif /* SIMSCSI_H */ diff -urN linux-2.4.0-test1/drivers/usb/devices.c linux-2.4.0-test1-lia/drivers/usb/devices.c --- linux-2.4.0-test1/drivers/usb/devices.c Wed Apr 26 15:22:55 2000 +++ linux-2.4.0-test1-lia/drivers/usb/devices.c Tue May 2 12:46:36 2000 @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include diff -urN linux-2.4.0-test1/drivers/usb/ibmcam.c linux-2.4.0-test1-lia/drivers/usb/ibmcam.c --- linux-2.4.0-test1/drivers/usb/ibmcam.c Wed May 24 17:52:43 2000 +++ linux-2.4.0-test1-lia/drivers/usb/ibmcam.c Fri May 26 20:39:20 2000 @@ -62,7 +62,7 @@ #define FLAGS_SEPARATE_FRAMES (1 << 5) #define FLAGS_CLEAN_FRAMES (1 << 6) -static int flags = 0; /* FLAGS_DISPLAY_HINTS | FLAGS_OVERLAY_STATS; */ +static int flags = FLAGS_RETRY_VIDIOCSYNC; /* FLAGS_DISPLAY_HINTS | FLAGS_OVERLAY_STATS; */ /* This is the size of V4L frame that we provide */ static const int imgwidth = V4L_FRAME_WIDTH_USED; diff -urN linux-2.4.0-test1/drivers/usb/uhci.c linux-2.4.0-test1-lia/drivers/usb/uhci.c --- linux-2.4.0-test1/drivers/usb/uhci.c Sat May 20 11:39:27 2000 +++ linux-2.4.0-test1-lia/drivers/usb/uhci.c Fri May 26 20:38:47 2000 @@ -33,7 +33,7 @@ #include #include #include -#define DEBUG +#undef DEBUG #include #include @@ -67,6 +67,46 @@ /* If a transfer is still active after this much time, turn off FSBR */ #define IDLE_TIMEOUT (HZ / 20) /* 50 ms */ +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + +static struct uhci *guhci; + +void +disable_usb (void) +{ + unsigned short cmd; + unsigned int io_addr; + + if (guhci == NULL) + return; + + io_addr = guhci->io_addr; + + cmd = inw (io_addr + USBCMD); + + outw(cmd & ~ USBCMD_RS, io_addr+USBCMD); + + while ((inw (io_addr + USBSTS) & USBSTS_HCH) == 0); +} + +void +reenable_usb (void) +{ + unsigned int io_addr; + unsigned short cmd; + + if (guhci == NULL) + return; + + io_addr = guhci->io_addr; + + cmd = inw (io_addr + USBCMD); + + outw(cmd | USBCMD_RS, io_addr+USBCMD); +} + +#endif /* CONFIG_ITANIUM_A1_SPECIFIC */ + /* * Only the USB core should call uhci_alloc_dev and uhci_free_dev */ @@ -2303,6 +2343,11 @@ if (!uhci_start_root_hub(uhci)) { struct pm_dev *pmdev; +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + guhci = uhci; + printk("%s: enabling Lion USB workaround io_addr=%x\n", + __FILE__, guhci->io_addr); +#endif pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(dev), handle_pm_event); diff -urN linux-2.4.0-test1/drivers/usb/usb-debug.c linux-2.4.0-test1-lia/drivers/usb/usb-debug.c --- linux-2.4.0-test1/drivers/usb/usb-debug.c Thu Mar 30 18:00:01 2000 +++ linux-2.4.0-test1-lia/drivers/usb/usb-debug.c Tue May 2 12:46:36 2000 @@ -10,9 +10,9 @@ #include #include #ifdef CONFIG_USB_DEBUG - #define DEBUG + #define DEBUG #else - #undef DEBUG + #undef DEBUG #endif #include diff -urN linux-2.4.0-test1/fs/binfmt_elf.c linux-2.4.0-test1-lia/fs/binfmt_elf.c --- linux-2.4.0-test1/fs/binfmt_elf.c Tue May 9 16:18:27 2000 +++ linux-2.4.0-test1-lia/fs/binfmt_elf.c Thu Jun 1 01:12:21 2000 @@ -161,7 +161,11 @@ NEW_AUX_ENT(0, AT_PHDR, load_addr + exec->e_phoff); NEW_AUX_ENT(1, AT_PHENT, sizeof (struct elf_phdr)); NEW_AUX_ENT(2, AT_PHNUM, exec->e_phnum); +#ifdef CONFIG_BINFMT_ELF32 + NEW_AUX_ENT(3, AT_PAGESZ, 4096); +#else // CONFIG_BINFMT_ELF32 NEW_AUX_ENT(3, AT_PAGESZ, ELF_EXEC_PAGESIZE); +#endif // CONFIG_BINFMT_ELF32 NEW_AUX_ENT(4, AT_BASE, interp_load_addr); NEW_AUX_ENT(5, AT_FLAGS, 0); NEW_AUX_ENT(6, AT_ENTRY, load_bias + exec->e_entry); @@ -198,6 +202,19 @@ return sp; } +#ifdef CONFIG_BINFMT_ELF32 +static unsigned long +ia32_mm_addr(unsigned long addr) +{ + struct vm_area_struct *vma; + + if ((vma = find_vma(current->mm, addr)) == NULL) + return(ELF_PAGESTART(addr)); + if (vma->vm_start > addr) + return(ELF_PAGESTART(addr)); + return(ELF_PAGEALIGN(addr)); +} +#endif // CONFIG_BINFMT_ELF32 /* This is much more generalized than the library routine read function, so we keep this separate. Technically the library read function @@ -261,6 +278,16 @@ if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) elf_type |= MAP_FIXED; +#ifdef CONFIG_BINFMT_ELF32 + set_brk(ia32_mm_addr(vaddr + load_addr), vaddr + load_addr + eppnt->p_memsz); + map_addr = vaddr + load_addr; + if (eppnt->p_memsz < (1UL<<32) && map_addr <= (1UL<<32) - eppnt->p_memsz) { + memset((char *) map_addr + eppnt->p_filesz, 0, + eppnt->p_memsz - eppnt->p_filesz); + kernel_read(interpreter, eppnt->p_offset, (char *)map_addr, eppnt->p_filesz); + } else + map_addr = -EINVAL; +#else /* !CONFIG_BINFMT_ELF32 */ down(¤t->mm->mmap_sem); map_addr = do_mmap(interpreter, load_addr + ELF_PAGESTART(vaddr), @@ -269,8 +296,7 @@ elf_type, eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr)); up(¤t->mm->mmap_sem); - if (map_addr > -1024UL) /* Real error */ - goto out_close; +#endif /* !CONFIG_BINFMT_ELF32 */ if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { load_addr = map_addr - ELF_PAGESTART(vaddr); @@ -475,6 +501,20 @@ if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) ibcs2_interpreter = 1; +#if defined(__ia64__) && !defined(CONFIG_BINFMT_ELF32) + /* + * XXX temporary gross hack until all IA-64 Linux binaries + * use /lib/ld-linux-ia64.so.1 as the linker name. + */ +#define INTRP64 "/lib/ld-linux-ia64.so.1" + if (strcmp(elf_interpreter,"/lib/ld-linux.so.2") == 0) { + kfree(elf_interpreter); + elf_interpreter=(char *)kmalloc(sizeof(INTRP64), GFP_KERNEL); + if (!elf_interpreter) + goto out_free_file; + strcpy(elf_interpreter, INTRP64); + } +#endif /* defined(__ia64__) && !defined(CONFIG_BINFMT_ELF32) */ #if 0 printk("Using ELF interpreter %s\n", elf_interpreter); #endif @@ -614,6 +654,17 @@ elf_flags |= MAP_FIXED; } +#ifdef CONFIG_BINFMT_ELF32 + set_brk(ia32_mm_addr(vaddr + load_bias), vaddr + load_bias + elf_ppnt->p_memsz); + error = vaddr + load_bias; + if (elf_ppnt->p_memsz < (1UL<<32) && error <= (1UL<<32) - elf_ppnt->p_memsz) { + memset((char *) error + elf_ppnt->p_filesz, 0, + elf_ppnt->p_memsz - elf_ppnt->p_filesz); + kernel_read(bprm->file, elf_ppnt->p_offset, (char *) error, + elf_ppnt->p_filesz); + } else + error = EINVAL; +#else /* CONFIG_BINFMT_ELF32 */ down(¤t->mm->mmap_sem); error = do_mmap(bprm->file, ELF_PAGESTART(load_bias + vaddr), (elf_ppnt->p_filesz + @@ -621,6 +672,7 @@ elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); up(¤t->mm->mmap_sem); +#endif /* CONFIG_BINFMT_ELF32 */ if (!load_addr_set) { load_addr_set = 1; diff -urN linux-2.4.0-test1/fs/filesystems.c linux-2.4.0-test1-lia/fs/filesystems.c --- linux-2.4.0-test1/fs/filesystems.c Fri May 12 11:21:20 2000 +++ linux-2.4.0-test1-lia/fs/filesystems.c Fri May 26 20:38:13 2000 @@ -52,7 +52,7 @@ #ifdef CONFIG_NFSD_MODULE int (*do_nfsservctl)(int, void *, void *); #endif -int +long asmlinkage sys_nfsservctl(int cmd, void *argp, void *resp) { #ifndef CONFIG_NFSD_MODULE diff -urN linux-2.4.0-test1/fs/lockd/xdr.c linux-2.4.0-test1-lia/fs/lockd/xdr.c --- linux-2.4.0-test1/fs/lockd/xdr.c Sun Apr 2 15:31:32 2000 +++ linux-2.4.0-test1-lia/fs/lockd/xdr.c Thu Jun 1 01:12:30 2000 @@ -86,7 +86,7 @@ if ((len = ntohl(*p++)) != NFS2_FHSIZE) { printk(KERN_NOTICE - "lockd: bad fhandle size %x (should be %Zu)\n", + "lockd: bad fhandle size %x (should be %u)\n", len, NFS2_FHSIZE); return NULL; } diff -urN linux-2.4.0-test1/fs/nfsd/nfscache.c linux-2.4.0-test1-lia/fs/nfsd/nfscache.c --- linux-2.4.0-test1/fs/nfsd/nfscache.c Wed May 24 17:49:31 2000 +++ linux-2.4.0-test1-lia/fs/nfsd/nfscache.c Thu Jun 1 01:12:41 2000 @@ -60,7 +60,7 @@ nfscache = (struct svc_cacherep *) __get_free_pages(GFP_KERNEL, order); if (!nfscache) { - printk (KERN_ERR "nfsd: cannot allocate %d bytes for reply cache\n", i); + printk (KERN_ERR "nfsd: cannot allocate %Zu bytes for reply cache\n", i); return; } memset(nfscache, 0, i); @@ -70,7 +70,7 @@ if (!hash_list) { free_pages ((unsigned long)nfscache, order); nfscache = NULL; - printk (KERN_ERR "nfsd: cannot allocate %d bytes for hash list\n", i); + printk (KERN_ERR "nfsd: cannot allocate %Zu bytes for hash list\n", i); return; } diff -urN linux-2.4.0-test1/fs/nfsd/nfsctl.c linux-2.4.0-test1-lia/fs/nfsd/nfsctl.c --- linux-2.4.0-test1/fs/nfsd/nfsctl.c Mon May 8 15:54:17 2000 +++ linux-2.4.0-test1-lia/fs/nfsd/nfsctl.c Fri May 26 20:38:01 2000 @@ -218,7 +218,7 @@ }; #define CMD_MAX (sizeof(sizes)/sizeof(sizes[0])-1) -int +long asmlinkage handle_sys_nfsservctl(int cmd, void *opaque_argp, void *opaque_resp) { struct nfsctl_arg * argp = opaque_argp; diff -urN linux-2.4.0-test1/fs/nfsd/nfsfh.c linux-2.4.0-test1-lia/fs/nfsd/nfsfh.c --- linux-2.4.0-test1/fs/nfsd/nfsfh.c Thu May 25 09:32:48 2000 +++ linux-2.4.0-test1-lia/fs/nfsd/nfsfh.c Fri May 26 20:39:21 2000 @@ -367,7 +367,7 @@ /* It's a directory, or we are required to confirm the file's * location in the tree. */ - dprintk("nfs_fh: need to look harder for %d/%ld\n",sb->s_dev,ino); + dprintk("nfs_fh: need to look harder for %d/%ld\n",sb->s_dev,(long) ino); down(&sb->s_nfsd_free_path_sem); /* claiming the semaphore might have allowed things to get fixed up */ diff -urN linux-2.4.0-test1/fs/proc/generic.c linux-2.4.0-test1-lia/fs/proc/generic.c --- linux-2.4.0-test1/fs/proc/generic.c Sun May 21 20:34:37 2000 +++ linux-2.4.0-test1-lia/fs/proc/generic.c Fri Jun 9 17:24:08 2000 @@ -42,7 +42,7 @@ #endif /* 4K page size but our output routines use some slack for overruns */ -#define PROC_BLOCK_SIZE (3*1024) +#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) static ssize_t proc_file_read(struct file * file, char * buf, size_t nbytes, loff_t *ppos) diff -urN linux-2.4.0-test1/fs/readdir.c linux-2.4.0-test1-lia/fs/readdir.c --- linux-2.4.0-test1/fs/readdir.c Sat May 20 10:55:28 2000 +++ linux-2.4.0-test1-lia/fs/readdir.c Thu Jun 1 01:30:33 2000 @@ -90,6 +90,8 @@ #define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de))) #define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1)) +#ifndef __ia64__ + struct old_linux_dirent { unsigned long d_ino; unsigned long d_offset; @@ -143,6 +145,8 @@ out: return error; } + +#endif /* !__ia64__ */ /* * New, all-improved, singing, dancing, iBCS2-compliant getdents() diff -urN linux-2.4.0-test1/include/asm-ia64/asmmacro.h linux-2.4.0-test1-lia/include/asm-ia64/asmmacro.h --- linux-2.4.0-test1/include/asm-ia64/asmmacro.h Wed Dec 31 16:00:00 1969 +++ linux-2.4.0-test1-lia/include/asm-ia64/asmmacro.h Thu May 25 23:51:04 2000 @@ -0,0 +1,48 @@ +#ifndef _ASM_IA64_ASMMACRO_H +#define _ASM_IA64_ASMMACRO_H + +/* + * Copyright (C) 2000 Hewlett-Packard Co + * Copyright (C) 2000 David Mosberger-Tang + */ + +#if 1 + +/* + * This is a hack that's necessary as long as we support old versions + * of gas, that have no unwind support. + */ +#include + +#ifdef CONFIG_IA64_NEW_UNWIND +# define UNW(args...) args +#else +# define UNW(args...) +#endif + +#endif + +#define ENTRY(name) \ + .align 16; \ + .proc name; \ +name: + +#define GLOBAL_ENTRY(name) \ + .global name; \ + ENTRY(name) + +#define END(name) \ + .endp name + +/* + * Helper macros to make unwind directives more readable: + */ + +/* prologue_gr: */ +#define ASM_UNW_PRLG_RP 0x8 +#define ASM_UNW_PRLG_PFS 0x4 +#define ASM_UNW_PRLG_PSP 0x2 +#define ASM_UNW_PRLG_PR 0x1 +#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs)) + +#endif /* _ASM_IA64_ASMMACRO_H */ diff -urN linux-2.4.0-test1/include/asm-ia64/ia32.h linux-2.4.0-test1-lia/include/asm-ia64/ia32.h --- linux-2.4.0-test1/include/asm-ia64/ia32.h Wed May 24 18:38:26 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/ia32.h Fri May 26 20:39:21 2000 @@ -276,7 +276,8 @@ #define ELF_PLATFORM 0 #ifdef __KERNEL__ -#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) +# define SET_PERSONALITY(EX,IBCS2) \ + (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX) #endif #define IA32_EFLAG 0x200 diff -urN linux-2.4.0-test1/include/asm-ia64/iosapic.h linux-2.4.0-test1-lia/include/asm-ia64/iosapic.h --- linux-2.4.0-test1/include/asm-ia64/iosapic.h Tue Feb 8 12:01:59 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/iosapic.h Thu May 25 23:10:29 2000 @@ -92,7 +92,7 @@ * } */ extern unsigned int iosapic_version(unsigned long); -extern void iosapic_init(unsigned long); +extern void iosapic_init(unsigned long, int); struct iosapic_vector { unsigned long iosapic_base; /* IOSAPIC Base address */ diff -urN linux-2.4.0-test1/include/asm-ia64/offsets.h linux-2.4.0-test1-lia/include/asm-ia64/offsets.h --- linux-2.4.0-test1/include/asm-ia64/offsets.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/offsets.h Thu May 25 23:10:59 2000 @@ -25,11 +25,95 @@ #define IA64_TASK_PID_OFFSET 188 /* 0xbc */ #define IA64_TASK_MM_OFFSET 88 /* 0x58 */ #define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */ +#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */ +#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */ +#define IA64_PT_REGS_AR_UNAT_OFFSET 24 /* 0x18 */ +#define IA64_PT_REGS_AR_PFS_OFFSET 32 /* 0x20 */ +#define IA64_PT_REGS_AR_RSC_OFFSET 40 /* 0x28 */ +#define IA64_PT_REGS_AR_RNAT_OFFSET 48 /* 0x30 */ +#define IA64_PT_REGS_AR_BSPSTORE_OFFSET 56 /* 0x38 */ +#define IA64_PT_REGS_PR_OFFSET 64 /* 0x40 */ +#define IA64_PT_REGS_B6_OFFSET 72 /* 0x48 */ +#define IA64_PT_REGS_LOADRS_OFFSET 80 /* 0x50 */ +#define IA64_PT_REGS_R1_OFFSET 88 /* 0x58 */ +#define IA64_PT_REGS_R2_OFFSET 96 /* 0x60 */ +#define IA64_PT_REGS_R3_OFFSET 104 /* 0x68 */ #define IA64_PT_REGS_R12_OFFSET 112 /* 0x70 */ +#define IA64_PT_REGS_R13_OFFSET 120 /* 0x78 */ +#define IA64_PT_REGS_R14_OFFSET 128 /* 0x80 */ +#define IA64_PT_REGS_R15_OFFSET 136 /* 0x88 */ #define IA64_PT_REGS_R8_OFFSET 144 /* 0x90 */ +#define IA64_PT_REGS_R9_OFFSET 152 /* 0x98 */ +#define IA64_PT_REGS_R10_OFFSET 160 /* 0xa0 */ +#define IA64_PT_REGS_R11_OFFSET 168 /* 0xa8 */ #define IA64_PT_REGS_R16_OFFSET 176 /* 0xb0 */ -#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */ +#define IA64_PT_REGS_R17_OFFSET 184 /* 0xb8 */ +#define IA64_PT_REGS_R18_OFFSET 192 /* 0xc0 */ +#define IA64_PT_REGS_R19_OFFSET 200 /* 0xc8 */ +#define IA64_PT_REGS_R20_OFFSET 208 /* 0xd0 */ +#define IA64_PT_REGS_R21_OFFSET 216 /* 0xd8 */ +#define IA64_PT_REGS_R22_OFFSET 224 /* 0xe0 */ +#define IA64_PT_REGS_R23_OFFSET 232 /* 0xe8 */ +#define IA64_PT_REGS_R24_OFFSET 240 /* 0xf0 */ +#define IA64_PT_REGS_R25_OFFSET 248 /* 0xf8 */ +#define IA64_PT_REGS_R26_OFFSET 256 /* 0x100 */ +#define IA64_PT_REGS_R27_OFFSET 264 /* 0x108 */ +#define IA64_PT_REGS_R28_OFFSET 272 /* 0x110 */ +#define IA64_PT_REGS_R29_OFFSET 280 /* 0x118 */ +#define IA64_PT_REGS_R30_OFFSET 288 /* 0x120 */ +#define IA64_PT_REGS_R31_OFFSET 296 /* 0x128 */ +#define IA64_PT_REGS_AR_CCV_OFFSET 304 /* 0x130 */ +#define IA64_PT_REGS_AR_FPSR_OFFSET 312 /* 0x138 */ +#define IA64_PT_REGS_B0_OFFSET 320 /* 0x140 */ +#define IA64_PT_REGS_B7_OFFSET 328 /* 0x148 */ +#define IA64_PT_REGS_F6_OFFSET 336 /* 0x150 */ +#define IA64_PT_REGS_F7_OFFSET 352 /* 0x160 */ +#define IA64_PT_REGS_F8_OFFSET 368 /* 0x170 */ +#define IA64_PT_REGS_F9_OFFSET 384 /* 0x180 */ #define IA64_SWITCH_STACK_CALLER_UNAT_OFFSET 0 /* 0x0 */ +#define IA64_SWITCH_STACK_AR_FPSR_OFFSET 8 /* 0x8 */ +#define IA64_SWITCH_STACK_F2_OFFSET 16 /* 0x10 */ +#define IA64_SWITCH_STACK_F3_OFFSET 32 /* 0x20 */ +#define IA64_SWITCH_STACK_F4_OFFSET 48 /* 0x30 */ +#define IA64_SWITCH_STACK_F5_OFFSET 64 /* 0x40 */ +#define IA64_SWITCH_STACK_F10_OFFSET 80 /* 0x50 */ +#define IA64_SWITCH_STACK_F11_OFFSET 96 /* 0x60 */ +#define IA64_SWITCH_STACK_F12_OFFSET 112 /* 0x70 */ +#define IA64_SWITCH_STACK_F13_OFFSET 128 /* 0x80 */ +#define IA64_SWITCH_STACK_F14_OFFSET 144 /* 0x90 */ +#define IA64_SWITCH_STACK_F15_OFFSET 160 /* 0xa0 */ +#define IA64_SWITCH_STACK_F16_OFFSET 176 /* 0xb0 */ +#define IA64_SWITCH_STACK_F17_OFFSET 192 /* 0xc0 */ +#define IA64_SWITCH_STACK_F18_OFFSET 208 /* 0xd0 */ +#define IA64_SWITCH_STACK_F19_OFFSET 224 /* 0xe0 */ +#define IA64_SWITCH_STACK_F20_OFFSET 240 /* 0xf0 */ +#define IA64_SWITCH_STACK_F21_OFFSET 256 /* 0x100 */ +#define IA64_SWITCH_STACK_F22_OFFSET 272 /* 0x110 */ +#define IA64_SWITCH_STACK_F23_OFFSET 288 /* 0x120 */ +#define IA64_SWITCH_STACK_F24_OFFSET 304 /* 0x130 */ +#define IA64_SWITCH_STACK_F25_OFFSET 320 /* 0x140 */ +#define IA64_SWITCH_STACK_F26_OFFSET 336 /* 0x150 */ +#define IA64_SWITCH_STACK_F27_OFFSET 352 /* 0x160 */ +#define IA64_SWITCH_STACK_F28_OFFSET 368 /* 0x170 */ +#define IA64_SWITCH_STACK_F29_OFFSET 384 /* 0x180 */ +#define IA64_SWITCH_STACK_F30_OFFSET 400 /* 0x190 */ +#define IA64_SWITCH_STACK_F31_OFFSET 416 /* 0x1a0 */ +#define IA64_SWITCH_STACK_R4_OFFSET 432 /* 0x1b0 */ +#define IA64_SWITCH_STACK_R5_OFFSET 440 /* 0x1b8 */ +#define IA64_SWITCH_STACK_R6_OFFSET 448 /* 0x1c0 */ +#define IA64_SWITCH_STACK_R7_OFFSET 456 /* 0x1c8 */ +#define IA64_SWITCH_STACK_B0_OFFSET 464 /* 0x1d0 */ +#define IA64_SWITCH_STACK_B1_OFFSET 472 /* 0x1d8 */ +#define IA64_SWITCH_STACK_B2_OFFSET 480 /* 0x1e0 */ +#define IA64_SWITCH_STACK_B3_OFFSET 488 /* 0x1e8 */ +#define IA64_SWITCH_STACK_B4_OFFSET 496 /* 0x1f0 */ +#define IA64_SWITCH_STACK_B5_OFFSET 504 /* 0x1f8 */ +#define IA64_SWITCH_STACK_AR_PFS_OFFSET 512 /* 0x200 */ +#define IA64_SWITCH_STACK_AR_LC_OFFSET 520 /* 0x208 */ +#define IA64_SWITCH_STACK_AR_UNAT_OFFSET 528 /* 0x210 */ +#define IA64_SWITCH_STACK_AR_RNAT_OFFSET 536 /* 0x218 */ +#define IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET 544 /* 0x220 */ +#define IA64_SWITCH_STACK_PR_OFFSET 464 /* 0x1d0 */ #define IA64_SIGCONTEXT_AR_BSP_OFFSET 72 /* 0x48 */ #define IA64_SIGCONTEXT_AR_RNAT_OFFSET 80 /* 0x50 */ #define IA64_SIGCONTEXT_FLAGS_OFFSET 0 /* 0x0 */ diff -urN linux-2.4.0-test1/include/asm-ia64/pal.h linux-2.4.0-test1-lia/include/asm-ia64/pal.h --- linux-2.4.0-test1/include/asm-ia64/pal.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/pal.h Thu Jun 1 01:13:03 2000 @@ -4,11 +4,12 @@ /* * Processor Abstraction Layer definitions. * - * This is based on version 2.4 of the manual "Enhanced Mode Processor - * Abstraction Layer". + * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0 + * chapter 11 IA-64 Processor Abstraction Layer * * Copyright (C) 1998-2000 Hewlett-Packard Co * Copyright (C) 1998-2000 David Mosberger-Tang + * Copyright (C) 2000 Stephane Eranian * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond * Copyright (C) 1999 Srinivasa Prasad Thirumalachar @@ -16,6 +17,8 @@ * 99/10/01 davidm Make sure we pass zero for reserved parameters. * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info + * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added + * 00/05/25 eranian Support for stack calls, and statis physical calls */ /* @@ -127,8 +130,8 @@ typedef union pal_cache_config_info_1_s { struct { u64 u : 1, /* 0 Unified cache ? */ - reserved : 5, /* 7-3 Reserved */ at : 2, /* 2-1 Cache mem attr*/ + reserved : 5, /* 7-3 Reserved */ associativity : 8, /* 16-8 Associativity*/ line_size : 8, /* 23-17 Line size */ stride : 8, /* 31-24 Stride */ @@ -164,8 +167,8 @@ u64 pcci_reserved; } pal_cache_config_info_t; -#define pcci_ld_hint pcci_info_1.pcci1.load_hints -#define pcci_st_hint pcci_info_1.pcci1_bits.store_hints +#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints +#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints #define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency #define pcci_st_latency pcci_info_1.pcci1_bits.store_latency #define pcci_stride pcci_info_1.pcci1_bits.stride @@ -640,23 +643,13 @@ * (generally 0) MUST be passed. Reserved parameters are not optional * parameters. */ -#ifdef __GCC_MULTIREG_RETVALS__ - extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64); - /* - * If multi-register return values are returned according to the - * ia-64 calling convention, we can call ia64_pal_call_static - * directly. - */ -# define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0,a1, a2, a3) -#else - extern void ia64_pal_call_static (struct ia64_pal_retval *, u64, u64, u64, u64); - /* - * If multi-register return values are returned through an aggregate - * allocated in the caller, we need to use the stub implemented in - * sal-stub.S. - */ -# define PAL_CALL(iprv,a0,a1,a2,a3) ia64_pal_call_static(&iprv, a0, a1, a2, a3) -#endif +extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64); + +#define PAL_CALL(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_static(a0, a1, a2, a3) +#define PAL_CALL_STK(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_stacked(a0, a1, a2, a3) +#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) iprv = ia64_pal_call_phys_static(a0, a1, a2, a3) typedef int (*ia64_pal_handler) (u64, ...); extern ia64_pal_handler ia64_pal; @@ -716,7 +709,7 @@ pal_bus_features_u_t *features_control) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0); + PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0); if (features_avail) features_avail->pal_bus_features_val = iprv.v0; if (features_status) @@ -725,15 +718,54 @@ features_control->pal_bus_features_val = iprv.v2; return iprv.status; } + /* Enables/disables specific processor bus features */ extern inline s64 ia64_pal_bus_set_features (pal_bus_features_u_t feature_select) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0); + PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0); return iprv.status; } +/* Get detailed cache information */ +extern inline s64 +ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf) +{ + struct ia64_pal_retval iprv; + + PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0); + + if (iprv.status == 0) { + conf->pcci_status = iprv.status; + conf->pcci_info_1.pcci1_data = iprv.v0; + conf->pcci_info_2.pcci2_data = iprv.v1; + conf->pcci_reserved = iprv.v2; + } + return iprv.status; + +} + +/* Get detailed cche protection information */ +extern inline s64 +ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot) +{ + struct ia64_pal_retval iprv; + + PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0); + + if (iprv.status == 0) { + prot->pcpi_status = iprv.status; + prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff; + prot->pcp_info[1].pcpi_data = iprv.v0 >> 32; + prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff; + prot->pcp_info[3].pcpi_data = iprv.v1 >> 32; + prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff; + prot->pcp_info[5].pcpi_data = iprv.v2 >> 32; + } + return iprv.status; +} + /* * Flush the processor instruction or data caches. *PROGRESS must be * initialized to zero before calling this for the first time.. @@ -909,7 +941,10 @@ struct { u64 exit_latency : 16, entry_latency : 16, - power_consumption : 32; + power_consumption : 28, + im : 1, + co : 1, + reserved : 2; } pal_power_mgmt_info_s; } pal_power_mgmt_info_u_t; @@ -918,7 +953,7 @@ ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0); + PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0); return iprv.status; } @@ -1027,7 +1062,7 @@ struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0); if (mem_attrib) - *mem_attrib = iprv.v0; + *mem_attrib = iprv.v0 & 0xff; return iprv.status; } @@ -1090,28 +1125,32 @@ return iprv.status; } -#ifdef TBD struct pal_features_s; /* Provide information about configurable processor features */ extern inline s64 -ia64_pal_proc_get_features (struct pal_features_s *features_avail, - struct pal_features_s *features_status, - struct pal_features_s *features_control) +ia64_pal_proc_get_features (u64 *features_avail, + u64 *features_status, + u64 *features_control) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0); + PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0); + if (iprv.status == 0) { + *features_avail = iprv.v0; + *features_status = iprv.v1; + *features_control = iprv.v2; + } return iprv.status; } + /* Enable/disable processor dependent features */ extern inline s64 -ia64_pal_proc_set_features (feature_select) +ia64_pal_proc_set_features (u64 feature_select) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0); + PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0); return iprv.status; } -#endif /* * Put everything in a struct so we avoid the global offset table whenever * possible. @@ -1220,12 +1259,16 @@ /* Return PAL version information */ extern inline s64 -ia64_pal_version (pal_version_u_t *pal_version) +ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_VERSION, 0, 0, 0); - if (pal_version) - pal_version->pal_version_val = iprv.v0; + if (pal_min_version) + pal_min_version->pal_version_val = iprv.v0; + + if (pal_cur_version) + pal_cur_version->pal_version_val = iprv.v1; + return iprv.status; } @@ -1242,7 +1285,14 @@ } pal_tc_info_s; } pal_tc_info_u_t; - +#define tc_reduce_tr pal_tc_info_s.reduce_tr +#define tc_unified pal_tc_info_s.unified +#define tc_pf pal_tc_info_s.pf +#define tc_num_entries pal_tc_info_s.num_entries +#define tc_associativity pal_tc_info_s.associativity +#define tc_num_sets pal_tc_info_s.num_sets + + /* Return information about the virtual memory characteristics of the processor * implementation. */ @@ -1278,7 +1328,7 @@ struct { u64 vw : 1, phys_add_size : 7, - key_size : 16, + key_size : 8, max_pkr : 8, hash_tag_id : 8, max_dtr_entry : 8, diff -urN linux-2.4.0-test1/include/asm-ia64/pgtable.h linux-2.4.0-test1-lia/include/asm-ia64/pgtable.h --- linux-2.4.0-test1/include/asm-ia64/pgtable.h Fri Apr 21 16:38:55 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/pgtable.h Thu Jun 1 01:14:01 2000 @@ -16,23 +16,16 @@ #include #include +#include #include -/* Size of virtuaql and physical address spaces: */ -#ifdef CONFIG_ITANIUM -# define IA64_IMPL_VA_MSB 50 -# define IA64_PHYS_BITS 44 /* Itanium PRM defines 44 bits of ppn */ -#else -# define IA64_IMPL_VA_MSB 60 /* maximum value (bits 61-63 are region bits) */ -# define IA64_PHYS_BITS 50 /* EAS2.6 allows up to 50 bits of ppn */ -#endif -#define IA64_PHYS_SIZE (__IA64_UL(1) << IA64_PHYS_BITS) +#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ /* Is ADDR a valid kernel address? */ #define kern_addr_valid(addr) ((addr) >= TASK_SIZE) /* Is ADDR a valid physical address? */ -#define phys_addr_valid(addr) ((addr) < IA64_PHYS_SIZE) +#define phys_addr_valid(addr) (((addr) & my_cpu_data.unimpl_pa_mask) == 0) /* * First, define the various bits in a PTE. Note that the PTE format @@ -63,7 +56,7 @@ #define _PAGE_AR_SHIFT 9 #define _PAGE_A (1 << 5) /* page accessed bit */ #define _PAGE_D (1 << 6) /* page dirty bit */ -#define _PAGE_PPN_MASK ((IA64_PHYS_SIZE - 1) & ~0xfffUL) +#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ #define _PAGE_PROTNONE (__IA64_UL(1) << 63) @@ -133,7 +126,7 @@ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) -#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RW) +#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) /* * Next come the mappings that determine how mmap() protection bits diff -urN linux-2.4.0-test1/include/asm-ia64/processor.h linux-2.4.0-test1-lia/include/asm-ia64/processor.h --- linux-2.4.0-test1/include/asm-ia64/processor.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/processor.h Fri Jun 9 17:24:33 2000 @@ -237,6 +237,8 @@ __u64 proc_freq; /* frequency of processor */ __u64 cyc_per_usec; /* itc_freq/1000000 */ __u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */ + __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ + __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ #ifdef CONFIG_SMP __u64 loops_per_sec; __u64 ipi_count; @@ -264,7 +266,8 @@ #define SET_UNALIGN_CTL(task,value) \ ({ \ - (task)->thread.flags |= ((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK; \ + (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ + | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ 0; \ }) #define GET_UNALIGN_CTL(task,addr) \ @@ -318,6 +321,7 @@ set_fs(USER_DS); \ ia64_psr(regs)->cpl = 3; /* set user mode */ \ ia64_psr(regs)->ri = 0; /* clear return slot number */ \ + ia64_psr(regs)->is = 0; /* IA-64 instruction set */ \ regs->cr_iip = new_ip; \ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ @@ -436,6 +440,14 @@ __asm__ __volatile__ (";; srlz.d" ::: "memory"); } +extern inline __u64 +ia64_get_rr (__u64 reg_bits) +{ + __u64 r; + __asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory"); + return r; +} + extern inline void ia64_set_rr (__u64 reg_bits, __u64 rr_val) { @@ -645,14 +657,17 @@ extern inline unsigned long thread_saved_pc (struct thread_struct *t) { - struct ia64_frame_info info; + struct unw_frame_info info; + unsigned long ip; + /* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */ struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET); - ia64_unwind_init_from_blocked_task(&info, p); - if (ia64_unwind_to_previous_frame(&info) < 0) + unw_init_from_blocked_task(&info, p); + if (unw_unwind(&info) < 0) return 0; - return ia64_unwind_get_ip(&info); + unw_get_ip(&info, &ip); + return ip; } /* diff -urN linux-2.4.0-test1/include/asm-ia64/ptrace.h linux-2.4.0-test1-lia/include/asm-ia64/ptrace.h --- linux-2.4.0-test1/include/asm-ia64/ptrace.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/ptrace.h Fri Jun 9 17:24:41 2000 @@ -220,10 +220,17 @@ extern long ia64_peek (struct pt_regs *, struct task_struct *, unsigned long addr, long *val); extern long ia64_poke (struct pt_regs *, struct task_struct *, unsigned long addr, long val); +#ifdef CONFIG_IA64_NEW_UNWIND + /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */ + extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat); + /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */ + extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat); +#else /* get nat bits for r1-r31 such that bit N==1 iff rN is a NaT */ extern long ia64_get_nat_bits (struct pt_regs *pt, struct switch_stack *sw); /* put nat bits for r1-r31 such that rN is a NaT iff bit N==1 */ extern void ia64_put_nat_bits (struct pt_regs *pt, struct switch_stack *sw, unsigned long nat); +#endif extern void ia64_increment_ip (struct pt_regs *pt); extern void ia64_decrement_ip (struct pt_regs *pt); diff -urN linux-2.4.0-test1/include/asm-ia64/ptrace_offsets.h linux-2.4.0-test1-lia/include/asm-ia64/ptrace_offsets.h --- linux-2.4.0-test1/include/asm-ia64/ptrace_offsets.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/ptrace_offsets.h Fri Jun 9 17:25:05 2000 @@ -118,8 +118,8 @@ #define PT_F126 0x05e0 #define PT_F127 0x05f0 /* switch stack: */ -#define PT_CALLER_UNAT 0x0600 -#define PT_KERNEL_FPSR 0x0608 +#define PT_NAT_BITS 0x0600 + #define PT_F2 0x0610 #define PT_F3 0x0620 #define PT_F4 0x0630 @@ -150,23 +150,19 @@ #define PT_R5 0x07b8 #define PT_R6 0x07c0 #define PT_R7 0x07c8 -#define PT_K_B0 0x07d0 + #define PT_B1 0x07d8 #define PT_B2 0x07e0 #define PT_B3 0x07e8 #define PT_B4 0x07f0 #define PT_B5 0x07f8 -#define PT_K_AR_PFS 0x0800 + #define PT_AR_LC 0x0808 -#define PT_K_AR_UNAT 0x0810 -#define PT_K_AR_RNAT 0x0818 -#define PT_K_AR_BSPSTORE 0x0820 -#define PT_K_PR 0x0828 + /* pt_regs */ #define PT_CR_IPSR 0x0830 #define PT_CR_IIP 0x0838 #define PT_CFM 0x0840 -#define PT_CR_IFS PT_CFM /* Use of PT_CR_IFS is deprecated */ #define PT_AR_UNAT 0x0848 #define PT_AR_PFS 0x0850 #define PT_AR_RSC 0x0858 diff -urN linux-2.4.0-test1/include/asm-ia64/sal.h linux-2.4.0-test1-lia/include/asm-ia64/sal.h --- linux-2.4.0-test1/include/asm-ia64/sal.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/sal.h Thu May 25 23:51:13 2000 @@ -23,17 +23,7 @@ extern spinlock_t sal_lock; -#ifdef __GCC_MULTIREG_RETVALS__ - /* If multi-register return values are returned according to the - ia-64 calling convention, we can call ia64_sal directly. */ -# define __SAL_CALL(result,args...) result = (*ia64_sal)(args) -#else - /* If multi-register return values are returned through an aggregate - allocated in the caller, we need to use the stub implemented in - sal-stub.S. */ - extern struct ia64_sal_retval ia64_sal_stub (u64 index, ...); -# define __SAL_CALL(result,args...) result = ia64_sal_stub(args) -#endif +#define __SAL_CALL(result,args...) result = (*ia64_sal)(args) #ifdef CONFIG_SMP # define SAL_CALL(result,args...) do { \ @@ -494,7 +484,19 @@ ia64_sal_pci_config_read (u64 pci_config_addr, u64 size, u64 *value) { struct ia64_sal_retval isrv; +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + extern spinlock_t ivr_read_lock; + unsigned long flags; + + /* + * Avoid PCI configuration read/write overwrite -- A0 Interrupt loss workaround + */ + spin_lock_irqsave(&ivr_read_lock, flags); +#endif SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size); +#ifdef CONFIG_ITANIUM_A1_SPECIFIC + spin_unlock_irqrestore(&ivr_read_lock, flags); +#endif if (value) *value = isrv.v0; return isrv.status; @@ -505,7 +507,7 @@ ia64_sal_pci_config_write (u64 pci_config_addr, u64 size, u64 value) { struct ia64_sal_retval isrv; -#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) && !defined(SAPIC_FIXED) +#ifdef CONFIG_ITANIUM_A1_SPECIFIC extern spinlock_t ivr_read_lock; unsigned long flags; @@ -515,7 +517,7 @@ spin_lock_irqsave(&ivr_read_lock, flags); #endif SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value); -#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) && !defined(SAPIC_FIXED) +#ifdef CONFIG_ITANIUM_A1_SPECIFIC spin_unlock_irqrestore(&ivr_read_lock, flags); #endif return isrv.status; diff -urN linux-2.4.0-test1/include/asm-ia64/siginfo.h linux-2.4.0-test1-lia/include/asm-ia64/siginfo.h --- linux-2.4.0-test1/include/asm-ia64/siginfo.h Wed May 24 18:38:26 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/siginfo.h Fri May 26 20:39:21 2000 @@ -56,6 +56,8 @@ struct { void *_addr; /* faulting insn/memory ref. */ int _imm; /* immediate value for "break" */ + int _pad0; + unsigned long _isr; /* isr */ } _sigfault; /* SIGPOLL */ @@ -79,6 +81,7 @@ #define si_ptr _sifields._rt._sigval.sival_ptr #define si_addr _sifields._sigfault._addr #define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */ +#define si_isr _sifields._sigfault._isr /* valid if si_code==FPE_FLTxxx */ #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd diff -urN linux-2.4.0-test1/include/asm-ia64/spinlock.h linux-2.4.0-test1-lia/include/asm-ia64/spinlock.h --- linux-2.4.0-test1/include/asm-ia64/spinlock.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/spinlock.h Thu May 25 23:51:01 2000 @@ -9,6 +9,8 @@ * This file is used for SMP configurations only. */ +#include + #include #include #include @@ -40,7 +42,7 @@ "cmp4.eq p0,p7 = r0, r2\n" \ "(p7) br.cond.spnt.few 1b\n" \ ";;\n" \ - :: "m" __atomic_fool_gcc((x)) : "r2", "r29") + :: "m" __atomic_fool_gcc((x)) : "r2", "r29", "memory") #else #define spin_lock(x) \ @@ -55,22 +57,12 @@ #define spin_is_locked(x) ((x)->lock != 0) -#define spin_unlock(x) (((spinlock_t *) x)->lock = 0) +#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0; barrier();}) /* Streamlined !test_and_set_bit(0, (x)) */ -#define spin_trylock(x) \ -({ \ - spinlock_t *__x = (x); \ - __u32 old; \ - \ - do { \ - old = __x->lock; \ - } while (cmpxchg_acq(&__x->lock, old, 1) != old); \ - old == 0; \ -}) +#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) -#define spin_unlock_wait(x) \ - ({ do { barrier(); } while(((volatile spinlock_t *)x)->lock); }) +#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); }) typedef struct { volatile int read_counter:31; @@ -78,45 +70,49 @@ } rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } -#define read_lock(rw) \ -do { \ - int tmp = 0; \ - __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \ - ";;\n" \ - "tbit.nz p6,p0 = %0, 31\n" \ - "(p6) br.cond.sptk.few 2f\n" \ - ".section .text.lock,\"ax\"\n" \ - "2:\tfetchadd4.rel %0 = %1, -1\n" \ - ";;\n" \ - "3:\tld4.acq %0 = %1\n" \ - ";;\n" \ - "tbit.nz p6,p0 = %0, 31\n" \ - "(p6) br.cond.sptk.few 3b\n" \ - "br.cond.sptk.few 1b\n" \ - ";;\n" \ - ".previous\n": "=r" (tmp), "=m" (__atomic_fool_gcc(rw))); \ +#define read_lock(rw) \ +do { \ + int tmp = 0; \ + __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \ + ";;\n" \ + "tbit.nz p6,p0 = %0, 31\n" \ + "(p6) br.cond.sptk.few 2f\n" \ + ".section .text.lock,\"ax\"\n" \ + "2:\tfetchadd4.rel %0 = %1, -1\n" \ + ";;\n" \ + "3:\tld4.acq %0 = %1\n" \ + ";;\n" \ + "tbit.nz p6,p0 = %0, 31\n" \ + "(p6) br.cond.sptk.few 3b\n" \ + "br.cond.sptk.few 1b\n" \ + ";;\n" \ + ".previous\n" \ + : "=r" (tmp), "=m" (__atomic_fool_gcc(rw)) \ + :: "memory"); \ } while(0) -#define read_unlock(rw) \ -do { \ - int tmp = 0; \ - __asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \ - : "=r" (tmp) : "m" (__atomic_fool_gcc(rw))); \ +#define read_unlock(rw) \ +do { \ + int tmp = 0; \ + __asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \ + : "=r" (tmp) \ + : "m" (__atomic_fool_gcc(rw)) \ + : "memory"); \ } while(0) #define write_lock(rw) \ -while(1) { \ +do { \ do { \ - } while (!test_and_set_bit(31, (rw))); \ - if ((rw)->read_counter) { \ - clear_bit(31, (rw)); \ - while ((rw)->read_counter) \ - ; \ - } else { \ - break; \ - } \ -} + while ((rw)->write_lock); \ + } while (test_and_set_bit(31, (rw))); \ + while ((rw)->read_counter); \ + barrier(); \ +} while (0) -#define write_unlock(x) (clear_bit(31, (x))) +/* + * clear_bit() has "acq" semantics; we're really need "rel" semantics, + * but for simplicity, we simply do a fence for now... + */ +#define write_unlock(x) ({clear_bit(31, (x)); mb();}) #endif /* _ASM_IA64_SPINLOCK_H */ diff -urN linux-2.4.0-test1/include/asm-ia64/string.h linux-2.4.0-test1-lia/include/asm-ia64/string.h --- linux-2.4.0-test1/include/asm-ia64/string.h Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/string.h Thu Jun 1 01:14:24 2000 @@ -12,4 +12,7 @@ #define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */ #define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */ +extern __kernel_size_t strlen (const char *); +extern void *memset (void *,int,__kernel_size_t); + #endif /* _ASM_IA64_STRING_H */ diff -urN linux-2.4.0-test1/include/asm-ia64/system.h linux-2.4.0-test1-lia/include/asm-ia64/system.h --- linux-2.4.0-test1/include/asm-ia64/system.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/system.h Fri Jun 9 17:25:19 2000 @@ -33,9 +33,9 @@ struct pci_vector_struct { __u16 bus; /* PCI Bus number */ - __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ - __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ - __u8 irq; /* IRQ assigned */ + __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ + __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ + __u8 irq; /* IRQ assigned */ }; extern struct ia64_boot_param { @@ -54,6 +54,8 @@ __u16 num_pci_vectors; /* number of ACPI derived PCI IRQ's*/ __u64 pci_vectors; /* physical address of PCI data (pci_vector_struct)*/ __u64 fpswa; /* physical address of the the fpswa interface */ + __u64 initrd_start; + __u64 initrd_size; } ia64_boot_param; extern inline void @@ -135,7 +137,7 @@ do { \ unsigned long ip, old_psr, psr = (x); \ \ - __asm__ __volatile__ ("mov %0=psr; mov psr.l=%1;; srlz.d" \ + __asm__ __volatile__ (";;mov %0=psr; mov psr.l=%1;; srlz.d" \ : "=&r" (old_psr) : "r" (psr) : "memory"); \ if ((old_psr & (1UL << 14)) && !(psr & (1UL << 14))) { \ __asm__ ("mov %0=ip" : "=r"(ip)); \ @@ -149,7 +151,7 @@ : "=r" (x) :: "memory") # define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory") /* (potentially) setting psr.i requires data serialization: */ -# define local_irq_restore(x) __asm__ __volatile__ ("mov psr.l=%0;; srlz.d" \ +# define local_irq_restore(x) __asm__ __volatile__ (";; mov psr.l=%0;; srlz.d" \ :: "r" (x) : "memory") #endif /* !CONFIG_IA64_DEBUG_IRQ */ @@ -394,32 +396,13 @@ #ifdef __KERNEL__ -extern void ia64_save_debug_regs (unsigned long *save_area); -extern void ia64_load_debug_regs (unsigned long *save_area); - #define prepare_to_switch() do { } while(0) #ifdef CONFIG_IA32_SUPPORT # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) -# define IA32_STATE(prev,next) \ - if (IS_IA32_PROCESS(ia64_task_regs(prev))) { \ - __asm__ __volatile__("mov %0=ar.eflag":"=r"((prev)->thread.eflag)); \ - __asm__ __volatile__("mov %0=ar.fsr":"=r"((prev)->thread.fsr)); \ - __asm__ __volatile__("mov %0=ar.fcr":"=r"((prev)->thread.fcr)); \ - __asm__ __volatile__("mov %0=ar.fir":"=r"((prev)->thread.fir)); \ - __asm__ __volatile__("mov %0=ar.fdr":"=r"((prev)->thread.fdr)); \ - } \ - if (IS_IA32_PROCESS(ia64_task_regs(next))) { \ - __asm__ __volatile__("mov ar.eflag=%0"::"r"((next)->thread.eflag)); \ - __asm__ __volatile__("mov ar.fsr=%0"::"r"((next)->thread.fsr)); \ - __asm__ __volatile__("mov ar.fcr=%0"::"r"((next)->thread.fcr)); \ - __asm__ __volatile__("mov ar.fir=%0"::"r"((next)->thread.fir)); \ - __asm__ __volatile__("mov ar.fdr=%0"::"r"((next)->thread.fdr)); \ - } -#else /* !CONFIG_IA32_SUPPORT */ -# define IA32_STATE(prev,next) +#else # define IS_IA32_PROCESS(regs) 0 -#endif /* CONFIG_IA32_SUPPORT */ +#endif /* * Context switch from one thread to another. If the two threads have @@ -432,15 +415,18 @@ * ia64_ret_from_syscall_clear_r8. */ extern struct task_struct *ia64_switch_to (void *next_task); + +extern void ia64_save_extra (struct task_struct *task); +extern void ia64_load_extra (struct task_struct *task); + #define __switch_to(prev,next,last) do { \ + if (((prev)->thread.flags & IA64_THREAD_DBG_VALID) \ + || IS_IA32_PROCESS(ia64_task_regs(prev))) \ + ia64_save_extra(prev); \ + if (((next)->thread.flags & IA64_THREAD_DBG_VALID) \ + || IS_IA32_PROCESS(ia64_task_regs(next))) \ + ia64_load_extra(next); \ ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \ - if ((prev)->thread.flags & IA64_THREAD_DBG_VALID) { \ - ia64_save_debug_regs(&(prev)->thread.dbr[0]); \ - } \ - if ((next)->thread.flags & IA64_THREAD_DBG_VALID) { \ - ia64_load_debug_regs(&(next)->thread.dbr[0]); \ - } \ - IA32_STATE(prev,next); \ (last) = ia64_switch_to((next)); \ } while (0) diff -urN linux-2.4.0-test1/include/asm-ia64/unistd.h linux-2.4.0-test1-lia/include/asm-ia64/unistd.h --- linux-2.4.0-test1/include/asm-ia64/unistd.h Fri Apr 21 15:21:24 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/unistd.h Mon May 15 12:13:41 2000 @@ -269,7 +269,7 @@ name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ { \ return __ia64_syscall((long) arg1, (long) arg2, (long) arg3, \ - (long) arg4, (long), __NR_##name); \ + (long) arg4, (long) arg5, __NR_##name); \ } #ifdef __KERNEL_SYSCALLS__ diff -urN linux-2.4.0-test1/include/asm-ia64/unwind.h linux-2.4.0-test1-lia/include/asm-ia64/unwind.h --- linux-2.4.0-test1/include/asm-ia64/unwind.h Sun Feb 6 18:42:40 2000 +++ linux-2.4.0-test1-lia/include/asm-ia64/unwind.h Fri Jun 9 17:25:28 2000 @@ -2,8 +2,8 @@ #define _ASM_IA64_UNWIND_H /* - * Copyright (C) 1999 Hewlett-Packard Co - * Copyright (C) 1999 David Mosberger-Tang + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger-Tang * * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need @@ -16,27 +16,70 @@ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ +enum unw_application_register { + UNW_AR_BSP, + UNW_AR_BSPSTORE, + UNW_AR_PFS, + UNW_AR_RNAT, + UNW_AR_UNAT, + UNW_AR_LC, + UNW_AR_FPSR, + UNW_AR_RSC, + UNW_AR_CCV +}; + /* * The following declarations are private to the unwind * implementation: */ -struct ia64_stack { - unsigned long *limit; - unsigned long *top; +struct unw_stack { + unsigned long limit; + unsigned long top; }; +#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) + /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ -struct ia64_frame_info { - struct ia64_stack regstk; - unsigned long *bsp; - unsigned long top_rnat; /* RSE NaT collection at top of backing store */ - unsigned long cfm; +struct unw_frame_info { + struct unw_stack regstk; + struct unw_stack memstk; + unsigned int flags; + short hint; + short prev_script; + unsigned long bsp; + unsigned long sp; /* stack pointer */ + unsigned long psp; /* previous sp */ unsigned long ip; /* instruction pointer */ + unsigned long pr_val; /* current predicates */ + unsigned long *cfm; + + struct switch_stack *sw; + + /* preserved state: */ + unsigned long *pbsp; /* previous bsp */ + unsigned long *bspstore; + unsigned long *pfs; + unsigned long *rnat; + unsigned long *rp; + unsigned long *pri_unat; + unsigned long *unat; + unsigned long *pr; + unsigned long *lc; + unsigned long *fpsr; + struct unw_ireg { + unsigned long *loc; + struct unw_ireg_nat { + int type : 3; /* enum unw_nat_type */ + signed int off; /* NaT word is at loc+nat.off */ + } nat; + } r4, r5, r6, r7; + unsigned long *b1, *b2, *b3, *b4, *b5; + struct ia64_fpreg *f2, *f3, *f4, *f5, *fr[16]; }; /* @@ -44,10 +87,19 @@ */ /* + * Initialize unwind support. + */ +extern void unw_init (void); + +extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, + void *table_start, void *table_end); + +extern void unw_remove_unwind_table (void *handle); + +/* * Prepare to unwind blocked task t. */ -extern void ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info, - struct task_struct *t); +extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); /* * Prepare to unwind the current task. For this to work, the kernel @@ -63,15 +115,70 @@ * | struct switch_stack | * +---------------------+ */ -extern void ia64_unwind_init_from_current (struct ia64_frame_info *info, struct pt_regs *regs); +extern void unw_init_from_current (struct unw_frame_info *info, struct pt_regs *regs); /* * Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ -extern int ia64_unwind_to_previous_frame (struct ia64_frame_info *info); +extern int unw_unwind (struct unw_frame_info *info); -#define ia64_unwind_get_ip(info) ((info)->ip) -#define ia64_unwind_get_bsp(info) ((unsigned long) (info)->bsp) +#define unw_get_ip(info,vp) ({*(vp) = (info)->ip; 0;}) +#define unw_get_sp(info,vp) ({*(vp) = (unsigned long) (info)->sp; 0;}) +#define unw_get_psp(info,vp) ({*(vp) = (unsigned long) (info)->psp; 0;}) +#define unw_get_bsp(info,vp) ({*(vp) = (unsigned long) (info)->bsp; 0;}) +#define unw_get_cfm(info,vp) ({*(vp) = *(info)->cfm; 0;}) +#define unw_set_cfm(info,val) ({*(info)->cfm = (val); 0;}) + +static inline int +unw_get_rp (struct unw_frame_info *info, unsigned long *val) +{ + if (!info->rp) + return -1; + *val = *info->rp; + return 0; +} + +extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int); +extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int); +extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int); +extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int); +extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int); + +static inline int +unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) +{ + return unw_access_gr(i, n, &v, &nat, 1); +} + +static inline int +unw_set_br (struct unw_frame_info *i, int n, unsigned long v) +{ + return unw_access_br(i, n, &v, 1); +} + +static inline int +unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) +{ + return unw_access_fr(i, n, &v, 1); +} + +static inline int +unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) +{ + return unw_access_ar(i, n, &v, 1); +} + +static inline int +unw_set_pr (struct unw_frame_info *i, unsigned long v) +{ + return unw_access_pr(i, &v, 1); +} + +#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) +#define unw_get_br(i,n,v) unw_access_br(i,n,v,0) +#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) +#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) +#define unw_get_pr(i,v) unw_access_pr(i,v,0) -#endif /* _ASM_IA64_UNWIND_H */ +#endif /* _ASM_UNWIND_H */ diff -urN linux-2.4.0-test1/include/linux/irq.h linux-2.4.0-test1-lia/include/linux/irq.h --- linux-2.4.0-test1/include/linux/irq.h Wed May 24 19:52:41 2000 +++ linux-2.4.0-test1-lia/include/linux/irq.h Thu May 25 23:51:02 2000 @@ -18,6 +18,7 @@ #define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL 64 /* IRQ level triggered */ #define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ +#define IRQ_PER_CPU 256 /* IRQ is per CPU */ /* * Interrupt controller descriptor. This is all we need @@ -55,6 +56,7 @@ #include /* the arch dependent stuff */ +extern unsigned int do_IRQ (unsigned long irq, struct pt_regs *regs); extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); extern int setup_irq(unsigned int , struct irqaction * ); diff -urN linux-2.4.0-test1/include/linux/nfsd/syscall.h linux-2.4.0-test1-lia/include/linux/nfsd/syscall.h --- linux-2.4.0-test1/include/linux/nfsd/syscall.h Wed May 24 19:53:29 2000 +++ linux-2.4.0-test1-lia/include/linux/nfsd/syscall.h Thu May 25 23:51:56 2000 @@ -133,7 +133,7 @@ * Kernel syscall implementation. */ #if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE) -extern asmlinkage int sys_nfsservctl(int, void *, void *); +extern asmlinkage long sys_nfsservctl(int, void *, void *); #else #define sys_nfsservctl sys_ni_syscall #endif diff -urN linux-2.4.0-test1/init/main.c linux-2.4.0-test1-lia/init/main.c --- linux-2.4.0-test1/init/main.c Wed May 24 17:52:52 2000 +++ linux-2.4.0-test1-lia/init/main.c Thu Jun 1 01:15:49 2000 @@ -109,6 +109,9 @@ #if defined(CONFIG_QUOTA) extern void dquot_init_hash(void); #endif +#ifdef CONFIG_PERFMON +extern void perfmon_init(void); +#endif /* * Boot command-line arguments @@ -569,6 +572,9 @@ #endif mem_init(); kmem_cache_sizes_init(); +#ifdef CONFIG_PERFMON + perfmon_init(); +#endif #ifdef CONFIG_3215_CONSOLE con3215_activate(); #endif diff -urN linux-2.4.0-test1/ipc/util.c linux-2.4.0-test1-lia/ipc/util.c --- linux-2.4.0-test1/ipc/util.c Mon May 15 11:19:26 2000 +++ linux-2.4.0-test1-lia/ipc/util.c Fri May 26 20:39:06 2000 @@ -223,6 +223,8 @@ out->seq = in->seq; } +#ifndef __ia64__ + int ipc_parse_version (int *cmd) { if (*cmd & IPC_64) { @@ -232,6 +234,8 @@ return IPC_OLD; } } + +#endif /* __ia64__ */ #else /* diff -urN linux-2.4.0-test1/ipc/util.h linux-2.4.0-test1-lia/ipc/util.h --- linux-2.4.0-test1/ipc/util.h Tue Feb 8 18:23:13 2000 +++ linux-2.4.0-test1-lia/ipc/util.h Tue May 2 12:46:36 2000 @@ -99,4 +99,9 @@ void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); +#ifdef __ia64__ + /* On IA-64, we always use the "64-bit version" of the IPC structures. */ +# define ipc_parse_version(cmd) IPC_64 +#else int ipc_parse_version (int *cmd); +#endif diff -urN linux-2.4.0-test1/kernel/printk.c linux-2.4.0-test1-lia/kernel/printk.c --- linux-2.4.0-test1/kernel/printk.c Fri May 12 11:21:20 2000 +++ linux-2.4.0-test1-lia/kernel/printk.c Fri May 26 20:38:15 2000 @@ -14,6 +14,8 @@ * manfreds@colorfullife.com */ +#include + #include #include #include @@ -296,6 +298,12 @@ break; } } +#ifdef CONFIG_IA64_EARLY_PRINTK + if (!console_drivers) { + static void early_printk (const char *str); + early_printk(msg); + } else +#endif if (msg_level < console_loglevel && console_drivers) { struct console *c = console_drivers; while(c) { @@ -412,6 +420,10 @@ } if ((console->flags & CON_PRINTBUFFER) == 0) goto done; +#ifdef CONFIG_IA64_EARLY_PRINTK + goto done; +#endif + /* * Print out buffered log messages. */ @@ -487,3 +499,47 @@ tty->driver.write(tty, 0, msg, strlen(msg)); return; } + +#ifdef CONFIG_IA64_EARLY_PRINTK + +#include + +#define VGABASE ((char *)0x00000000000b8000) + +static int current_ypos = 50, current_xpos = 0; + +void +early_printk (const char *str) +{ + char c; + int i, k, j; + + while ((c = *str++) != '\0') { + if (current_ypos >= 50) { + /* scroll 1 line up */ + for (k = 1, j = 0; k < 50; k++, j++) { + for (i = 0; i < 80; i++) { + writew(readw(VGABASE + 2*(80*k + i)), + VGABASE + 2*(80*j + i)); + } + } + for (i = 0; i < 80; i++) { + writew(0x720, VGABASE + 2*(80*j + i)); + } + current_ypos = 49; + } + if (c == '\n') { + current_xpos = 0; + current_ypos++; + } else if (c != '\r') { + writew(((0x7 << 8) | (unsigned short) c), + VGABASE + 2*(80*current_ypos + current_xpos++)); + if (current_xpos >= 80) { + current_xpos = 0; + current_ypos++; + } + } + } +} + +#endif /* CONFIG_IA64_EARLY_PRINTK */