diff -urN 2.3.42/arch/alpha/kernel/Makefile 2.3.42-alpha-irq/arch/alpha/kernel/Makefile --- 2.3.42/arch/alpha/kernel/Makefile Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/Makefile Thu Feb 3 20:12:30 2000 @@ -14,7 +14,7 @@ O_TARGET := kernel.o O_OBJS := entry.o traps.o process.o osf_sys.o irq.o signal.o setup.o \ - ptrace.o time.o semaphore.o + ptrace.o time.o semaphore.o i8259.o rtc_irq.o OX_OBJS := alpha_ksyms.o diff -urN 2.3.42/arch/alpha/kernel/i8259.c 2.3.42-alpha-irq/arch/alpha/kernel/i8259.c --- 2.3.42/arch/alpha/kernel/i8259.c Thu Jan 1 01:00:00 1970 +++ 2.3.42-alpha-irq/arch/alpha/kernel/i8259.c Thu Feb 3 20:12:30 2000 @@ -0,0 +1,123 @@ +/* started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c */ + +#include +#include +#include +#include +#include + +#include +#include + +/* + * This is the 'legacy' 8259A Programmable Interrupt Controller, + * present in the majority of PC/AT boxes. + */ + +static void enable_8259A_irq(unsigned int irq); +static void disable_8259A_irq(unsigned int irq); + +/* shutdown is same as "disable" */ +#define end_8259A_irq enable_8259A_irq +#define shutdown_8259A_irq disable_8259A_irq + +static void mask_and_ack_8259A(unsigned int); + +static unsigned int startup_8259A_irq(unsigned int irq) +{ + enable_8259A_irq(irq); + return 0; /* never anything pending */ +} + +static struct hw_interrupt_type i8259A_irq_type = { + "XT-PIC", + startup_8259A_irq, + shutdown_8259A_irq, + enable_8259A_irq, + disable_8259A_irq, + mask_and_ack_8259A, + end_8259A_irq +}; + +/* + * 8259A PIC functions to handle ISA devices: + */ + +/* + * This contains the irq mask for both 8259A irq controllers, + */ +static unsigned int cached_irq_mask = 0xffff; + +#define __byte(x,y) (((unsigned char *)&(y))[x]) +#define cached_21 (__byte(0,cached_irq_mask)) +#define cached_A1 (__byte(1,cached_irq_mask)) + +/* + * These have to be protected by the irq controller spinlock + * before being called. + */ +static void disable_8259A_irq(unsigned int irq) +{ + unsigned int mask = 1 << irq; + cached_irq_mask |= mask; + if (irq & 8) + outb(cached_A1,0xA1); + else + outb(cached_21,0x21); +} + +static void enable_8259A_irq(unsigned int irq) +{ + unsigned int mask = ~(1 << irq); + cached_irq_mask &= mask; + if (irq & 8) + outb(cached_A1,0xA1); + else + outb(cached_21,0x21); +} + +static void mask_and_ack_8259A(unsigned int irq) +{ + disable_8259A_irq(irq); + + /* Ack the interrupt making it the lowest priority */ + /* First the slave .. */ + if (irq > 7) { + outb(0xE0 | (irq - 8), 0xa0); + irq = 2; + } + /* .. then the master */ + outb(0xE0 | irq, 0x20); +} + +static void init_8259A(void) +{ + outb(0xff, 0x21); /* mask all of 8259A-1 */ + outb(0xff, 0xA1); /* mask all of 8259A-2 */ +} + +/* + * IRQ2 is cascade interrupt to second interrupt controller + */ +static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL}; + +void __init +init_ISA_irqs (void) +{ + int i; + + for (i = 0; i < NR_IRQS; i++) { + if (i == RTC_IRQ) + continue; + if (i >= 16) + break; + irq_desc[i].status = IRQ_DISABLED; + /* + * 16 old-style INTA-cycle interrupts: + */ + irq_desc[i].handler = &i8259A_irq_type; + } + + init_8259A(); + setup_irq(2, &irq2); +} diff -urN 2.3.42/arch/alpha/kernel/irq.c 2.3.42-alpha-irq/arch/alpha/kernel/irq.c --- 2.3.42/arch/alpha/kernel/irq.c Thu Dec 9 02:27:27 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/irq.c Thu Feb 3 20:12:30 2000 @@ -39,6 +39,7 @@ #ifndef __SMP__ int __local_irq_count; int __local_bh_count; +unsigned long __irq_attempt[NR_IRQS]; #endif #if NR_IRQS > 128 @@ -57,12 +58,6 @@ /* - * Shadow-copy of masked interrupts. - */ - -unsigned long _alpha_irq_masks[2] = { ~0UL, ~0UL }; - -/* * The ack_irq routine used by 80% of the systems. */ @@ -135,7 +130,7 @@ return; } } - handle_irq(j, j, regs); + handle_irq(j, regs); #else unsigned long pic; @@ -169,77 +164,201 @@ void srm_device_interrupt(unsigned long vector, struct pt_regs * regs) { - int irq, ack; + int irq; - ack = irq = (vector - 0x800) >> 4; - handle_irq(irq, ack, regs); + irq = (vector - 0x800) >> 4; + handle_irq(irq, regs); } /* + * Special irq handlers. + */ + +void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } + +/* * Initial irq handlers. */ -static struct irqaction timer_irq = { NULL, 0, 0, NULL, NULL, NULL}; -spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED; -irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = {0,} }; +static void enable_none(unsigned int irq) { } +static unsigned int startup_none(unsigned int irq) { return 0; } +static void disable_none(unsigned int irq) { } +static void ack_none(unsigned int irq) +{ + printk("unexpected IRQ trap at vector %02x\n", irq); +} + +/* startup is the same as "enable", shutdown is same as "disable" */ +#define shutdown_none disable_none +#define end_none enable_none + +struct hw_interrupt_type no_irq_type = { + "none", + startup_none, + shutdown_none, + enable_none, + disable_none, + ack_none, + end_none +}; +spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED; +irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = + { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }}; -static inline void -mask_irq(unsigned long irq) +int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) { - set_bit(irq, _alpha_irq_masks); - alpha_mv.update_irq_hw(irq, alpha_irq_mask, 0); -} + int status; + int cpu = smp_processor_id(); -static inline void -unmask_irq(unsigned long irq) -{ - clear_bit(irq, _alpha_irq_masks); - alpha_mv.update_irq_hw(irq, alpha_irq_mask, 1); + kstat.irqs[cpu][irq]++; + irq_enter(cpu, irq); + + status = 1; /* Force the "do bottom halves" bit */ + + do { + if (!(action->flags & SA_INTERRUPT)) + __sti(); + else + __cli(); + + status |= action->flags; + action->handler(irq, action->dev_id, regs); + action = action->next; + } while (action); + if (status & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + __cli(); + + irq_exit(cpu, irq); + + return status; } +/* + * Generic enable/disable code: this just calls + * down into the PIC-specific version for the actual + * hardware disable after having gotten the irq + * controller lock. + */ void -disable_irq_nosync(unsigned int irq_nr) +disable_irq_nosync(unsigned int irq) { unsigned long flags; - save_and_cli(flags); - mask_irq(irq_nr); - restore_flags(flags); + spin_lock_irqsave(&irq_controller_lock, flags); + if (!irq_desc[irq].depth++) { + irq_desc[irq].status |= IRQ_DISABLED; + irq_desc[irq].handler->disable(irq); + } + spin_unlock_irqrestore(&irq_controller_lock, flags); } +/* + * Synchronous version of the above, making sure the IRQ is + * no longer running on any other IRQ.. + */ void -disable_irq(unsigned int irq_nr) +disable_irq(unsigned int irq) { - /* This works non-SMP, and SMP until we write code to distribute - interrupts to more that cpu 0. */ - disable_irq_nosync(irq_nr); + disable_irq_nosync(irq); + + if (!local_irq_count(smp_processor_id())) { + do { + barrier(); + } while (irq_desc[irq].status & IRQ_INPROGRESS); + } } void -enable_irq(unsigned int irq_nr) +enable_irq(unsigned int irq) { unsigned long flags; - save_and_cli(flags); - unmask_irq(irq_nr); - restore_flags(flags); + spin_lock_irqsave(&irq_controller_lock, flags); + switch (irq_desc[irq].depth) { + case 1: { + unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED; + irq_desc[irq].status = status; + if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + irq_desc[irq].status = status | IRQ_REPLAY; + hw_resend_irq(irq_desc[irq].handler,irq); /* noop */ + } + irq_desc[irq].handler->enable(irq); + /* fall-through */ + } + default: + irq_desc[irq].depth--; + break; + case 0: + printk("enable_irq() unbalanced from %p\n", + __builtin_return_address(0)); + } + spin_unlock_irqrestore(&irq_controller_lock, flags); } int -check_irq(unsigned int irq) +setup_irq(unsigned int irq, struct irqaction * new) { - return irq_desc[irq].action ? -EBUSY : 0; + int shared = 0; + struct irqaction *old, **p; + unsigned long flags; + + /* + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. + */ + if (new->flags & SA_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); + } + + /* + * The following block of code has to be executed atomically + */ + spin_lock_irqsave(&irq_controller_lock,flags); + p = &irq_desc[irq].action; + if ((old = *p) != NULL) { + /* Can't share interrupts unless both agree to */ + if (!(old->flags & new->flags & SA_SHIRQ)) { + spin_unlock_irqrestore(&irq_controller_lock,flags); + return -EBUSY; + } + + /* add new interrupt at end of irq queue */ + do { + p = &old->next; + old = *p; + } while (old); + shared = 1; + } + + *p = new; + + if (!shared) { + irq_desc[irq].depth = 0; + irq_desc[irq].status &= ~IRQ_DISABLED; + irq_desc[irq].handler->startup(irq); + } + spin_unlock_irqrestore(&irq_controller_lock,flags); + return 0; } int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id) { - int shared = 0; - struct irqaction * action, **p; - unsigned long flags; + int retval; + struct irqaction * action; if (irq >= ACTUAL_NR_IRQS) return -EINVAL; @@ -248,36 +367,25 @@ if (!handler) return -EINVAL; - p = &irq_desc[irq].action; - action = *p; - if (action) { - /* Can't share interrupts unless both agree to */ - if (!(action->flags & irqflags & SA_SHIRQ)) - return -EBUSY; - - /* Can't share interrupts unless both are same type */ - if ((action->flags ^ irqflags) & SA_INTERRUPT) - return -EBUSY; - - /* Add new interrupt at end of irq queue */ - do { - p = &action->next; - action = *p; - } while (action); - shared = 1; +#if 1 + /* + * Sanity-check: shared interrupts should REALLY pass in + * a real dev-ID, otherwise we'll have trouble later trying + * to figure out which interrupt is which (messes up the + * interrupt freeing logic etc). + */ + if (irqflags & SA_SHIRQ) { + if (!dev_id) + printk("Bad boy: %s (at %p) called us without a dev_id!\n", + devname, __builtin_return_address(0)); } +#endif - action = &timer_irq; - if (irq != TIMER_IRQ) { - action = (struct irqaction *) + action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL); - } if (!action) return -ENOMEM; - if (irqflags & SA_SAMPLE_RANDOM) - rand_initialize_irq(irq); - action->handler = handler; action->flags = irqflags; action->mask = 0; @@ -285,20 +393,16 @@ action->next = NULL; action->dev_id = dev_id; - save_and_cli(flags); - *p = action; - - if (!shared) - unmask_irq(irq); - - restore_flags(flags); - return 0; + retval = setup_irq(irq, action); + if (retval) + kfree(action); + return retval; } - + void free_irq(unsigned int irq, void *dev_id) { - struct irqaction * action, **p; + struct irqaction **p; unsigned long flags; if (irq >= ACTUAL_NR_IRQS) { @@ -309,25 +413,39 @@ printk("Trying to free reserved IRQ %d\n", irq); return; } - for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { - if (action->dev_id != dev_id) - continue; + spin_lock_irqsave(&irq_controller_lock,flags); + p = &irq_desc[irq].action; + for (;;) { + struct irqaction * action = *p; + if (action) { + struct irqaction **pp = p; + p = &action->next; + if (action->dev_id != dev_id) + continue; - /* Found it - now free it */ - save_and_cli(flags); - *p = action->next; - if (!irq_desc[irq].action) - mask_irq(irq); - restore_flags(flags); - kfree(action); + /* Found it - now remove it from the list of entries */ + *pp = action->next; + if (!irq_desc[irq].action) { + irq_desc[irq].status |= IRQ_DISABLED; + irq_desc[irq].handler->shutdown(irq); + } + spin_unlock_irqrestore(&irq_controller_lock,flags); + + /* Wait to make sure it's not being used on another CPU */ + while (irq_desc[irq].status & IRQ_INPROGRESS) + barrier(); + kfree(action); + return; + } + printk("Trying to free free IRQ%d\n",irq); + spin_unlock_irqrestore(&irq_controller_lock,flags); return; } - printk("Trying to free free IRQ%d\n",irq); } int get_irq_list(char *buf) { - int i; + int i, j; struct irqaction * action; char *p = buf; @@ -335,6 +453,8 @@ p += sprintf(p, " "); for (i = 0; i < smp_num_cpus; i++) p += sprintf(p, "CPU%d ", i); + for (i = 0; i < smp_num_cpus; i++) + p += sprintf(p, "TRY%d ", i); *p++ = '\n'; #endif @@ -346,13 +466,14 @@ #ifndef __SMP__ p += sprintf(p, "%10u ", kstat_irqs(i)); #else - { - int j; - for (j = 0; j < smp_num_cpus; j++) - p += sprintf(p, "%10u ", - kstat.irqs[cpu_logical_map(j)][i]); - } + for (j = 0; j < smp_num_cpus; j++) + p += sprintf(p, "%10u ", + kstat.irqs[cpu_logical_map(j)][i]); + for (j = 0; j < smp_num_cpus; j++) + p += sprintf(p, "%10lu ", + irq_attempt(cpu_logical_map(j), i)); #endif + p += sprintf(p, " %14s", irq_desc[i].handler->typename); p += sprintf(p, " %c%s", (action->flags & SA_INTERRUPT)?'+':' ', action->name); @@ -364,6 +485,13 @@ } *p++ = '\n'; } +#if CONFIG_SMP + p += sprintf(p, "LOC: "); + for (j = 0; j < smp_num_cpus; j++) + p += sprintf(p, "%10lu ", + cpu_data[cpu_logical_map(j)].smp_local_irq_count); + p += sprintf(p, "\n"); +#endif return p - buf; } @@ -638,139 +766,157 @@ } #endif /* __SMP__ */ -static void -unexpected_irq(int irq, struct pt_regs * regs) -{ -#if 0 -#if 1 - printk("device_interrupt: unexpected interrupt %d\n", irq); -#else - struct irqaction *action; - int i; - - printk("IO device interrupt, irq = %d\n", irq); - printk("PC = %016lx PS=%04lx\n", regs->pc, regs->ps); - printk("Expecting: "); - for (i = 0; i < ACTUAL_NR_IRQS; i++) - if ((action = irq_desc[i].action)) - while (action->handler) { - printk("[%s:%d] ", action->name, i); - action = action->next; - } - printk("\n"); -#endif -#endif - -#if defined(CONFIG_ALPHA_JENSEN) - /* ??? Is all this just debugging, or are the inb's and outb's - necessary to make things work? */ - printk("64=%02x, 60=%02x, 3fa=%02x 2fa=%02x\n", - inb(0x64), inb(0x60), inb(0x3fa), inb(0x2fa)); - outb(0x0c, 0x3fc); - outb(0x0c, 0x2fc); - outb(0,0x61); - outb(0,0x461); -#endif -} - +/* + * do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ void -handle_irq(int irq, int ack, struct pt_regs * regs) -{ - struct irqaction * action; +handle_irq(int irq, struct pt_regs * regs) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ int cpu = smp_processor_id(); + irq_desc_t *desc; + struct irqaction * action; + unsigned int status; if ((unsigned) irq > ACTUAL_NR_IRQS) { printk("device_interrupt: illegal interrupt %d\n", irq); return; } -#if 0 - /* A useful bit of code to find out if an interrupt is going wild. */ - { - static unsigned int last_msg, last_cc; - static int last_irq, count; - unsigned int cc; - - __asm __volatile("rpcc %0" : "=r"(cc)); - ++count; - if (cc - last_msg > 150000000 || irq != last_irq) { - printk("handle_irq: irq %d count %d cc %u @ %p\n", - irq, count, cc-last_cc, regs->pc); - count = 0; - last_msg = cc; - last_irq = irq; - } - last_cc = cc; + irq_attempt(cpu, irq)++; + desc = irq_desc + irq; + spin_lock_irq(&irq_controller_lock); /* mask also the RTC */ + desc->handler->ack(irq); + /* + REPLAY is when Linux resends an IRQ that was dropped earlier + WAITING is used by probe to mark irqs that are being tested + */ + status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); + status |= IRQ_PENDING; /* we _want_ to handle it */ + + /* + * If the IRQ is disabled for whatever reason, we cannot + * use the action we have. + */ + action = NULL; + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + action = desc->action; + status &= ~IRQ_PENDING; /* we commit to handling */ + status |= IRQ_INPROGRESS; /* we are handling it */ } -#endif + desc->status = status; + spin_unlock(&irq_controller_lock); - irq_enter(cpu, irq); - kstat.irqs[cpu][irq] += 1; - action = irq_desc[irq].action; + /* + * If there is no IRQ handler or it was disabled, exit early. + Since we set PENDING, if another processor is handling + a different instance of this same irq, the other processor + will take care of it. + */ + if (!action) + return; /* - * For normal interrupts, we mask it out, and then ACK it. - * This way another (more timing-critical) interrupt can - * come through while we're doing this one. - * - * Note! An irq without a handler gets masked and acked, but - * never unmasked. The autoirq stuff depends on this (it looks - * at the masks before and after doing the probing). - */ - if (ack >= 0) { - mask_irq(ack); - alpha_mv.ack_irq(ack); - } - if (action) { - if (action->flags & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - do { - action->handler(irq, action->dev_id, regs); - action = action->next; - } while (action); - if (ack >= 0) - unmask_irq(ack); - } else { - unexpected_irq(irq, regs); + * Edge triggered interrupts need to remember + * pending events. + * This applies to any hw interrupts that allow a second + * instance of the same irq to arrive while we are in do_IRQ + * or in the handler. But the code here only handles the _second_ + * instance of the irq, not the third or fourth. So it is mostly + * useful for irq hardware that does not mask cleanly in an + * SMP environment. + */ + for (;;) { + handle_IRQ_event(irq, regs, action); + spin_lock(&irq_controller_lock); + + if (!(desc->status & IRQ_PENDING) + || (desc->status & IRQ_LEVEL)) + break; + desc->status &= ~IRQ_PENDING; + spin_unlock(&irq_controller_lock); } - irq_exit(cpu, irq); + desc->status &= ~IRQ_INPROGRESS; + if (!(desc->status & IRQ_DISABLED)) + desc->handler->end(irq); + spin_unlock(&irq_controller_lock); } - /* - * Start listening for interrupts.. + * IRQ autodetection code.. + * + * This depends on the fact that any interrupt that + * comes in on to an unassigned handler will get stuck + * with "IRQ_WAITING" cleared and the interrupt + * disabled. */ - unsigned long probe_irq_on(void) { - struct irqaction * action; - unsigned long irqs = 0; - unsigned long delay; unsigned int i; + unsigned long delay; - /* Handle only the first 64 IRQs here. This is enough for - [E]ISA, which is the only thing that needs probing anyway. */ - for (i = (ACTUAL_NR_IRQS - 1) & 63; i > 0; i--) { - if (!(PROBE_MASK & (1UL << i))) { - continue; - } - action = irq_desc[i].action; - if (!action) { - enable_irq(i); - irqs |= (1UL << i); + /* Something may have generated an irq long ago and we want to + flush such a longstanding irq before considering it as spurious. */ + spin_lock_irq(&irq_controller_lock); + for (i = NR_IRQS-1; i > 0; i--) + if (!irq_desc[i].action) + irq_desc[i].handler->startup(i); + spin_unlock_irq(&irq_controller_lock); + + /* Wait for longstanding interrupts to trigger. */ + for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) + /* about 20ms delay */ synchronize_irq(); + + /* enable any unassigned irqs (we must startup again here because + if a longstanding irq happened in the previous stage, it may have + masked itself) first, enable any unassigned irqs. */ + spin_lock_irq(&irq_controller_lock); + for (i = NR_IRQS-1; i > 0; i--) { + if (!irq_desc[i].action) { + irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING; + if(irq_desc[i].handler->startup(i)) + irq_desc[i].status |= IRQ_PENDING; } } + spin_unlock_irq(&irq_controller_lock); /* - * Wait about 100ms for spurious interrupts to mask themselves - * out again... + * Wait for spurious interrupts to trigger */ - for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) - barrier(); + for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) + /* about 100ms delay */ synchronize_irq(); + + /* + * Now filter out any obviously spurious interrupts + */ + spin_lock_irq(&irq_controller_lock); + for (i=0; ishutdown(i); + } + } + spin_unlock_irq(&irq_controller_lock); + + return 0x12345678; } /* @@ -780,19 +926,35 @@ */ int -probe_irq_off(unsigned long irqs) +probe_irq_off(unsigned long unused) { - int i; - - /* Handle only the first 64 IRQs here. This is enough for - [E]ISA, which is the only thing that needs probing anyway. */ - irqs &= alpha_irq_mask; - if (!irqs) - return 0; - i = ffz(~irqs); - if (irqs != (1UL << i)) - i = -i; - return i; + int i, irq_found, nr_irqs; + + if (unused != 0x12345678) + printk("Bad IRQ probe from %lx\n", (&unused)[-1]); + + nr_irqs = 0; + irq_found = 0; + spin_lock_irq(&irq_controller_lock); + for (i=0; ishutdown(i); + } + spin_unlock_irq(&irq_controller_lock); + + if (nr_irqs > 1) + irq_found = -irq_found; + return irq_found; } @@ -815,7 +977,12 @@ #endif break; case 1: - handle_irq(RTC_IRQ, -1, ®s); +#ifdef __SMP__ + cpu_data[smp_processor_id()].smp_local_irq_count++; + smp_percpu_timer_interrupt(®s); + if (smp_processor_id() == smp_boot_cpuid) +#endif + handle_irq(RTC_IRQ, ®s); return; case 2: alpha_mv.machine_check(vector, la_ptr, ®s); diff -urN 2.3.42/arch/alpha/kernel/process.c 2.3.42-alpha-irq/arch/alpha/kernel/process.c --- 2.3.42/arch/alpha/kernel/process.c Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/process.c Thu Feb 3 20:12:30 2000 @@ -30,9 +30,11 @@ #include #include +#if 0 #ifdef CONFIG_RTC #include #endif +#endif #include #include @@ -139,9 +141,11 @@ #endif } +#if 0 #ifdef CONFIG_RTC /* Reset rtc to defaults. */ rtc_kill_pit(); +#endif #endif if (alpha_mv.kill_arch) diff -urN 2.3.42/arch/alpha/kernel/rtc_irq.c 2.3.42-alpha-irq/arch/alpha/kernel/rtc_irq.c --- 2.3.42/arch/alpha/kernel/rtc_irq.c Thu Jan 1 01:00:00 1970 +++ 2.3.42-alpha-irq/arch/alpha/kernel/rtc_irq.c Thu Feb 3 20:12:30 2000 @@ -0,0 +1,26 @@ +/* RTC irq callbacks, 1999 Andrea Arcangeli */ + +#include +#include +#include + +static void enable_rtc(unsigned int irq) { } +static unsigned int startup_rtc(unsigned int irq) { return 0; } +#define shutdown_rtc enable_rtc +#define end_rtc enable_rtc +#define ack_rtc enable_rtc +#define disable_rtc enable_rtc + +void __init +init_RTC_irq(void) +{ + static struct hw_interrupt_type rtc_irq_type = { "RTC", + startup_rtc, + shutdown_rtc, + enable_rtc, + disable_rtc, + ack_rtc, + end_rtc }; + irq_desc[RTC_IRQ].status = IRQ_DISABLED; + irq_desc[RTC_IRQ].handler = &rtc_irq_type; +} diff -urN 2.3.42/arch/alpha/kernel/setup.c 2.3.42-alpha-irq/arch/alpha/kernel/setup.c --- 2.3.42/arch/alpha/kernel/setup.c Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/setup.c Thu Feb 3 20:12:30 2000 @@ -30,9 +30,11 @@ #include #include +#if 0 #ifdef CONFIG_RTC #include #endif +#endif #ifdef CONFIG_BLK_DEV_INITRD #include #endif @@ -462,6 +464,7 @@ /* Reserve standard resources. */ reserve_std_resources(); +#if 0 /* Initialize the timers. */ /* ??? There is some circumstantial evidence that this needs to be done now rather than later in time_init, which would @@ -470,6 +473,7 @@ rtc_init_pit(); #else alpha_mv.init_pit(); +#endif #endif /* diff -urN 2.3.42/arch/alpha/kernel/smp.c 2.3.42-alpha-irq/arch/alpha/kernel/smp.c --- 2.3.42/arch/alpha/kernel/smp.c Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/smp.c Thu Feb 3 20:12:30 2000 @@ -62,6 +62,7 @@ static unsigned long smp_secondary_alive; unsigned long cpu_present_mask; /* Which cpus ids came online. */ +static unsigned long __cpu_present_mask __initdata = 0; /* cpu reported in the hwrpb */ static int max_cpus = -1; /* Command-line limitation. */ int smp_boot_cpuid; /* Which processor we booted from. */ @@ -506,7 +507,7 @@ if ((cpu->flags & 0x1cc) == 0x1cc) { smp_num_probed++; /* Assume here that "whami" == index */ - cpu_present_mask |= (1L << i); + __cpu_present_mask |= (1L << i); cpu->pal_revision = boot_cpu_palrev; } @@ -517,11 +518,12 @@ } } else { smp_num_probed = 1; - cpu_present_mask = (1L << smp_boot_cpuid); + __cpu_present_mask = (1L << smp_boot_cpuid); } + cpu_present_mask = 1L << smp_boot_cpuid; printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", - smp_num_probed, cpu_present_mask); + smp_num_probed, __cpu_present_mask); } /* @@ -565,13 +567,14 @@ if (i == smp_boot_cpuid) continue; - if (((cpu_present_mask >> i) & 1) == 0) + if (((__cpu_present_mask >> i) & 1) == 0) continue; if (smp_boot_one_cpu(i, cpu_count)) continue; cpu_count++; + cpu_present_mask |= 1L << i; } if (cpu_count == 1) { diff -urN 2.3.42/arch/alpha/kernel/sys_dp264.c 2.3.42-alpha-irq/arch/alpha/kernel/sys_dp264.c --- 2.3.42/arch/alpha/kernel/sys_dp264.c Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/sys_dp264.c Thu Feb 3 20:12:30 2000 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -36,60 +37,158 @@ * HACK ALERT! only the boot cpu is used for interrupts. */ +static void enable_tsunami_irq(unsigned int irq); +static void disable_tsunami_irq(unsigned int irq); +static void enable_clipper_irq(unsigned int irq); +static void disable_clipper_irq(unsigned int irq); + +#define end_tsunami_irq enable_tsunami_irq +#define shutdown_tsunami_irq disable_tsunami_irq +#define mask_and_ack_tsunami_irq disable_tsunami_irq + +#define end_clipper_irq enable_clipper_irq +#define shutdown_clipper_irq disable_clipper_irq +#define mask_and_ack_clipper_irq disable_clipper_irq + + +static unsigned int +startup_tsunami_irq(unsigned int irq) +{ + enable_tsunami_irq(irq); + return 0; /* never anything pending */ +} + +static unsigned int +startup_clipper_irq(unsigned int irq) +{ + enable_clipper_irq(irq); + return 0; /* never anything pending */ +} + +static struct hw_interrupt_type tsunami_irq_type = { + "TSUNAMI", + startup_tsunami_irq, + shutdown_tsunami_irq, + enable_tsunami_irq, + disable_tsunami_irq, + mask_and_ack_tsunami_irq, + end_tsunami_irq +}; + +static struct hw_interrupt_type clipper_irq_type = { + "CLIPPER", + startup_clipper_irq, + shutdown_clipper_irq, + enable_clipper_irq, + disable_clipper_irq, + mask_and_ack_clipper_irq, + end_clipper_irq +}; + +static unsigned long cached_irq_mask = ~0UL; + +#define TSUNAMI_SET_IRQ_MASK(cpu, value) \ +do { \ + volatile unsigned long *csr; \ + \ + csr = &TSUNAMI_cchip->dim##cpu##.csr; \ + *csr = (value); \ + mb(); \ + *csr; \ +} while(0) + +static inline void +do_flush_irq_mask(unsigned long value) +{ + switch (TSUNAMI_bootcpu) + { + case 0: + TSUNAMI_SET_IRQ_MASK(0, value); + break; + case 1: + TSUNAMI_SET_IRQ_MASK(1, value); + break; + case 2: + TSUNAMI_SET_IRQ_MASK(2, value); + break; + case 3: + TSUNAMI_SET_IRQ_MASK(3, value); + break; + } +} + +#ifdef CONFIG_SMP +static inline void +do_flush_smp_irq_mask(unsigned long value) +{ + extern unsigned long cpu_present_mask; + unsigned long other_cpus = cpu_present_mask & ~(1L << TSUNAMI_bootcpu); + + if (other_cpus & 1) + TSUNAMI_SET_IRQ_MASK(0, value); + if (other_cpus & 2) + TSUNAMI_SET_IRQ_MASK(1, value); + if (other_cpus & 4) + TSUNAMI_SET_IRQ_MASK(2, value); + if (other_cpus & 8) + TSUNAMI_SET_IRQ_MASK(3, value); +} +#endif + static void -dp264_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p) +dp264_flush_irq_mask(unsigned long mask) { - volatile unsigned long *csr; + unsigned long value; - if (TSUNAMI_bootcpu < 2) { - if (!TSUNAMI_bootcpu) - csr = &TSUNAMI_cchip->dim0.csr; - else - csr = &TSUNAMI_cchip->dim1.csr; - } else { - if (TSUNAMI_bootcpu == 2) - csr = &TSUNAMI_cchip->dim2.csr; - else - csr = &TSUNAMI_cchip->dim3.csr; - } +#ifdef CONFIG_SMP + value = ~mask; + do_flush_smp_irq_mask(value); +#endif - *csr = ~mask; - mb(); - *csr; - - if (irq < 16) { - if (irq >= 8) - outb(mask >> 8, 0xA1); /* ISA PIC2 */ - else - outb(mask, 0x21); /* ISA PIC1 */ - } + value = ~mask | (1UL << 55) | 0xffff; /* isa irqs always enabled */ + do_flush_irq_mask(value); } static void -clipper_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p) +enable_tsunami_irq(unsigned int irq) { - if (irq >= 16) { - volatile unsigned long *csr; + cached_irq_mask &= ~(1UL << irq); + dp264_flush_irq_mask(cached_irq_mask); +} - if (TSUNAMI_bootcpu < 2) - if (!TSUNAMI_bootcpu) - csr = &TSUNAMI_cchip->dim0.csr; - else - csr = &TSUNAMI_cchip->dim1.csr; - else - if (TSUNAMI_bootcpu == 2) - csr = &TSUNAMI_cchip->dim2.csr; - else - csr = &TSUNAMI_cchip->dim3.csr; - - *csr = (~mask >> 16) | (1UL << 55); /* master ISA enable */ - mb(); - *csr; - } - else if (irq >= 8) - outb(mask >> 8, 0xA1); /* ISA PIC2 */ - else - outb(mask, 0x21); /* ISA PIC1 */ +static void +disable_tsunami_irq(unsigned int irq) +{ + cached_irq_mask |= 1UL << irq; + dp264_flush_irq_mask(cached_irq_mask); +} + +static void +clipper_flush_irq_mask(unsigned long mask) +{ + unsigned long value; + +#ifdef CONFIG_SMP + value = ~mask >> 16; + do_flush_smp_irq_mask(value); +#endif + + value = (~mask >> 16) | (1UL << 55); /* master ISA enable */ + do_flush_irq_mask(value); +} + +static void +enable_clipper_irq(unsigned int irq) +{ + cached_irq_mask &= ~(1UL << irq); + clipper_flush_irq_mask(cached_irq_mask); +} + +static void +disable_clipper_irq(unsigned int irq) +{ + cached_irq_mask |= 1UL << irq; + clipper_flush_irq_mask(cached_irq_mask); } static void @@ -126,9 +225,9 @@ static void dp264_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) { - int irq, ack; + int irq; - ack = irq = (vector - 0x800) >> 4; + irq = (vector - 0x800) >> 4; /* * The SRM console reports PCI interrupts with a vector calculated by: @@ -142,17 +241,17 @@ * so we don't count them. */ if (irq >= 32) - ack = irq = irq - 16; + irq -= 16; - handle_irq(irq, ack, regs); + handle_irq(irq, regs); } static void clipper_srm_device_interrupt(unsigned long vector, struct pt_regs * regs) { - int irq, ack; + int irq; - ack = irq = (vector - 0x800) >> 4; + irq = (vector - 0x800) >> 4; /* * The SRM console reports PCI interrupts with a vector calculated by: @@ -166,7 +265,22 @@ * * Eg IRQ 24 is DRIR bit 8, etc, etc */ - handle_irq(irq, ack, regs); + handle_irq(irq, regs); +} + +static void __init +init_TSUNAMI_irqs(struct hw_interrupt_type * ops) +{ + int i; + + for (i = 0; i < NR_IRQS; i++) { + if (i == RTC_IRQ) + continue; + if (i < 16) + continue; + irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; + irq_desc[i].handler = ops; + } } static void __init @@ -180,10 +294,11 @@ if (alpha_using_srm) alpha_mv.device_interrupt = dp264_srm_device_interrupt; - dp264_update_irq_hw(16, alpha_irq_mask, 0); + init_ISA_irqs(); + init_RTC_irq(); + init_TSUNAMI_irqs(&tsunami_irq_type); - enable_irq(55); /* Enable ISA interrupt controller. */ - enable_irq(2); + dp264_flush_irq_mask(~0UL); } static void __init @@ -197,10 +312,11 @@ if (alpha_using_srm) alpha_mv.device_interrupt = clipper_srm_device_interrupt; - clipper_update_irq_hw(16, alpha_irq_mask, 0); + init_ISA_irqs(); + init_RTC_irq(); + init_TSUNAMI_irqs(&clipper_irq_type); - enable_irq(55); /* Enable ISA interrupt controller. */ - enable_irq(2); + clipper_flush_irq_mask(~0UL); } @@ -431,9 +547,6 @@ min_mem_address: DEFAULT_MEM_BASE, nr_irqs: 64, - irq_probe_mask: TSUNAMI_PROBE_MASK, - update_irq_hw: dp264_update_irq_hw, - ack_irq: common_ack_irq, device_interrupt: dp264_device_interrupt, init_arch: tsunami_init_arch, @@ -458,9 +571,6 @@ min_mem_address: DEFAULT_MEM_BASE, nr_irqs: 64, - irq_probe_mask: TSUNAMI_PROBE_MASK, - update_irq_hw: dp264_update_irq_hw, - ack_irq: common_ack_irq, device_interrupt: dp264_device_interrupt, init_arch: tsunami_init_arch, @@ -484,9 +594,6 @@ min_mem_address: DEFAULT_MEM_BASE, nr_irqs: 64, - irq_probe_mask: TSUNAMI_PROBE_MASK, - update_irq_hw: dp264_update_irq_hw, - ack_irq: common_ack_irq, device_interrupt: dp264_device_interrupt, init_arch: tsunami_init_arch, @@ -510,9 +617,6 @@ min_mem_address: DEFAULT_MEM_BASE, nr_irqs: 64, - irq_probe_mask: TSUNAMI_PROBE_MASK, - update_irq_hw: clipper_update_irq_hw, - ack_irq: common_ack_irq, device_interrupt: dp264_device_interrupt, init_arch: tsunami_init_arch, diff -urN 2.3.42/arch/alpha/kernel/sys_sx164.c 2.3.42-alpha-irq/arch/alpha/kernel/sys_sx164.c --- 2.3.42/arch/alpha/kernel/sys_sx164.c Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/sys_sx164.c Thu Feb 3 20:12:30 2000 @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include @@ -26,47 +28,83 @@ #include #include "proto.h" -#include #include "pci_impl.h" #include "machvec_impl.h" +/* Note invert on MASK bits. */ +static unsigned long cached_irq_mask; + +static inline void +sx164_change_irq_mask(unsigned long mask) +{ + *(vulp)PYXIS_INT_MASK = mask; + mb(); + *(vulp)PYXIS_INT_MASK; +} + +static inline void +sx164_enable_irq(unsigned int irq) +{ + sx164_change_irq_mask(cached_irq_mask |= 1UL << (irq - 16)); +} + static void -sx164_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p) +sx164_disable_irq(unsigned int irq) { - if (irq >= 16) { - /* Make CERTAIN none of the bogus ints get enabled */ - *(vulp)PYXIS_INT_MASK = - ~((long)mask >> 16) & ~0x000000000000003bUL; - mb(); - /* ... and read it back to make sure it got written. */ - *(vulp)PYXIS_INT_MASK; - } - else if (irq >= 8) - outb(mask >> 8, 0xA1); /* ISA PIC2 */ - else - outb(mask, 0x21); /* ISA PIC1 */ + sx164_change_irq_mask(cached_irq_mask &= ~(1UL << (irq - 16))); +} + +static unsigned int +sx164_startup_irq(unsigned int irq) +{ + sx164_enable_irq(irq); + return 0; +} + +static inline void +sx164_srm_enable_irq(unsigned int irq) +{ + cserve_ena(irq - 16); } static void -sx164_srm_update_irq_hw(unsigned long irq, unsigned long mask, int unmask_p) +sx164_srm_disable_irq(unsigned int irq) { - if (irq >= 16) { - if (unmask_p) - cserve_ena(irq - 16); - else - cserve_dis(irq - 16); - } - else if (irq >= 8) - outb(mask >> 8, 0xA1); /* ISA PIC2 */ - else - outb(mask, 0x21); /* ISA PIC1 */ + cserve_dis(irq - 16); } +static unsigned int +sx164_srm_startup_irq(unsigned int irq) +{ + sx164_srm_enable_irq(irq); + return 0; +} + +static struct hw_interrupt_type sx164_irq_type = { + typename: "SX164", + startup: sx164_startup_irq, + shutdown: sx164_disable_irq, + enable: sx164_enable_irq, + disable: sx164_disable_irq, + ack: sx164_disable_irq, + end: sx164_enable_irq, +}; + +static struct hw_interrupt_type sx164_srm_irq_type = { + typename: "SX164-SRM", + startup: sx164_srm_startup_irq, + shutdown: sx164_srm_disable_irq, + enable: sx164_srm_enable_irq, + disable: sx164_srm_disable_irq, + ack: sx164_srm_disable_irq, + end: sx164_srm_enable_irq, +}; + static void sx164_device_interrupt(unsigned long vector, struct pt_regs *regs) { - unsigned long pld, tmp; + unsigned long pld; unsigned int i; /* Read the interrupt summary register of PYXIS */ @@ -93,35 +131,48 @@ continue; } else { /* if not timer int */ - handle_irq(16 + i, 16 + i, regs); + handle_irq(16 + i, regs); } - *(vulp)PYXIS_INT_REQ = 1UL << i; mb(); - tmp = *(vulp)PYXIS_INT_REQ; + + *(vulp)PYXIS_INT_REQ = 1UL << i; + mb(); + *(vulp)PYXIS_INT_REQ; } } static void sx164_init_irq(void) { + struct hw_interrupt_type *ops; + long i; + outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); + init_ISA_irqs(); + init_RTC_irq(); + if (alpha_using_srm) { - alpha_mv.update_irq_hw = sx164_srm_update_irq_hw; alpha_mv.device_interrupt = srm_device_interrupt; + ops = &sx164_srm_irq_type; } else { - /* Note invert on MASK bits. */ - *(vulp)PYXIS_INT_MASK = ~((long)alpha_irq_mask >> 16); - mb(); - *(vulp)PYXIS_INT_MASK; + sx164_change_irq_mask(0); + ops = &sx164_irq_type; + } + + for (i = 16; i < 40; ++i) { + /* Make CERTAIN none of the bogus ints get enabled. */ + if ((0x3b0000 >> i) & 1) + continue; + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].handler = ops; } - enable_irq(16 + 6); /* enable timer */ - enable_irq(16 + 7); /* enable ISA PIC cascade */ - enable_irq(2); /* enable cascade */ + ops->startup(16 + 6); /* enable timer */ + ops->startup(16 + 7); /* enable ISA PIC cascade */ } /* @@ -202,9 +253,6 @@ min_mem_address: DEFAULT_MEM_BASE, nr_irqs: 40, - irq_probe_mask: _PROBE_MASK(40), - update_irq_hw: sx164_update_irq_hw, - ack_irq: common_ack_irq, device_interrupt: sx164_device_interrupt, init_arch: pyxis_init_arch, diff -urN 2.3.42/arch/alpha/kernel/time.c 2.3.42-alpha-irq/arch/alpha/kernel/time.c --- 2.3.42/arch/alpha/kernel/time.c Wed Dec 8 00:05:25 1999 +++ 2.3.42-alpha-irq/arch/alpha/kernel/time.c Thu Feb 3 20:12:30 2000 @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include #include @@ -88,13 +90,7 @@ __u32 now; long nticks; -#ifdef __SMP__ - /* When SMP, do this for *all* CPUs, but only do the rest for - the boot CPU. */ - smp_percpu_timer_interrupt(regs); - if (smp_processor_id() != smp_boot_cpuid) - return; -#else +#ifndef __SMP__ /* Not SMP, do kernel PC profiling here. */ if (!user_mode(regs)) alpha_do_profile(regs->pc); @@ -167,6 +163,7 @@ )*60 + sec; /* finally seconds */ } +#if 0 /* * Initialize Programmable Interval Timers with standard values. Some * drivers depend on them being initialized (e.g., joystick driver). @@ -213,6 +210,7 @@ sti(); } #endif +#endif void common_init_pit (void) @@ -248,10 +246,15 @@ void time_init(void) { - void (*irq_handler)(int, void *, struct pt_regs *); unsigned int year, mon, day, hour, min, sec, cc1, cc2; unsigned long cycle_freq, one_percent; long diff; + static struct irqaction timer_irqaction = { timer_interrupt, + SA_INTERRUPT, 0, "timer", + NULL, NULL}; + + /* Startup the timer source. */ + alpha_mv.init_pit(); /* * The Linux interpretation of the CMOS clock register contents: @@ -337,9 +340,7 @@ state.partial_tick = 0L; /* setup timer */ - irq_handler = timer_interrupt; - if (request_irq(TIMER_IRQ, irq_handler, 0, "timer", NULL)) - panic("Could not allocate timer IRQ!"); + setup_irq(TIMER_IRQ, &timer_irqaction); } /* diff -urN 2.3.42/drivers/char/rtc.c 2.3.42-alpha-irq/drivers/char/rtc.c --- 2.3.42/drivers/char/rtc.c Sun Jan 30 15:43:37 2000 +++ 2.3.42-alpha-irq/drivers/char/rtc.c Thu Feb 3 20:14:06 2000 @@ -129,6 +129,7 @@ static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; +#ifndef __alpha__ /* * A very tiny interrupt handler. It runs with SA_INTERRUPT set, * so that there is no possibility of conflicting with the @@ -156,6 +157,7 @@ if (atomic_read(&rtc_status) & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); } +#endif /* * Now all the various file operations that we export. @@ -169,6 +171,9 @@ static ssize_t rtc_read(struct file *file, char *buf, size_t count, loff_t *ppos) { +#ifdef __alpha__ + return -EIO; +#else DECLARE_WAITQUEUE(wait, current); unsigned long data; ssize_t retval; @@ -200,6 +205,7 @@ remove_wait_queue(&rtc_wait, &wait); return retval; +#endif } static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, @@ -210,6 +216,7 @@ struct rtc_time wtime; switch (cmd) { +#ifndef __alpha__ case RTC_AIE_OFF: /* Mask alarm int. enab. bit */ { mask_rtc_irq_bit(RTC_AIE); @@ -259,6 +266,7 @@ set_rtc_irq_bit(RTC_UIE); return 0; } +#endif case RTC_ALM_READ: /* Read the present alarm time */ { /* @@ -392,6 +400,7 @@ spin_unlock_irqrestore(&rtc_lock, flags); return 0; } +#ifndef __alpha__ case RTC_IRQP_READ: /* Read the periodic IRQ rate. */ { return put_user(rtc_freq, (unsigned long *)arg); @@ -431,7 +440,7 @@ spin_unlock_irqrestore(&rtc_lock, flags); return 0; } -#ifdef __alpha__ +#else case RTC_EPOCH_READ: /* Read the epoch. */ { return put_user (epoch, (unsigned long *)arg); @@ -482,6 +491,7 @@ static int rtc_release(struct inode *inode, struct file *file) { +#ifndef __alpha__ /* * Turn off all interrupts once the device is no longer * in use, and clear the data. @@ -504,6 +514,7 @@ del_timer(&rtc_irq_timer); } +#endif MOD_DEC_USE_COUNT; spin_lock_irqsave (&rtc_lock, flags); @@ -513,6 +524,7 @@ return 0; } +#ifndef __alpha__ static unsigned int rtc_poll(struct file *file, poll_table *wait) { unsigned long l, flags; @@ -527,6 +539,7 @@ return POLLIN | POLLRDNORM; return 0; } +#endif /* * The various file operations we support. @@ -537,7 +550,11 @@ rtc_read, NULL, /* No write */ NULL, /* No readdir */ +#ifdef __alpha__ + NULL, +#else rtc_poll, +#endif rtc_ioctl, NULL, /* No mmap */ rtc_open, @@ -599,12 +616,14 @@ return -EIO; } +#ifndef __alpha__ if(request_irq(RTC_IRQ, rtc_interrupt, SA_INTERRUPT, "rtc", NULL)) { /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); return -EIO; } +#endif request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); #endif /* __sparc__ vs. others */ @@ -641,12 +660,14 @@ if (guess) printk("rtc: %s epoch (%lu) detected\n", guess, epoch); #endif +#ifndef __alpha__ init_timer(&rtc_irq_timer); rtc_irq_timer.function = rtc_dropped_irq; spin_lock_irqsave(&rtc_lock, flags); /* Initialize periodic freq. to CMOS reset default, which is 1024Hz */ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); +#endif rtc_freq = 1024; printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n"); @@ -676,6 +697,7 @@ module_exit(rtc_exit); EXPORT_NO_SYMBOLS; +#ifndef __alpha__ /* * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. * (usually during an IDE disk interrupt, with IRQ unmasking off) @@ -701,6 +723,7 @@ rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */ spin_unlock_irqrestore(&rtc_lock, flags); } +#endif /* * Info exported via "/proc/driver/rtc". @@ -889,6 +912,7 @@ } } +#ifndef __alpha__ /* * Used to disable/enable interrupts for any one of UIE, AIE, PIE. * Rumour has it that if you frob the interrupt enable/disable @@ -926,3 +950,4 @@ rtc_irq_data = 0; spin_unlock_irqrestore(&rtc_lock, flags); } +#endif diff -urN 2.3.42/include/asm-alpha/hardirq.h 2.3.42-alpha-irq/include/asm-alpha/hardirq.h --- 2.3.42/include/asm-alpha/hardirq.h Tue Jan 18 01:11:24 2000 +++ 2.3.42-alpha-irq/include/asm-alpha/hardirq.h Thu Feb 3 20:12:30 2000 @@ -8,8 +8,11 @@ #ifndef __SMP__ extern int __local_irq_count; #define local_irq_count(cpu) ((void)(cpu), __local_irq_count) +extern unsigned long __irq_attempt[]; +#define irq_attempt(cpu, irq) ((void)(cpu), __irq_attempt[irq]) #else #define local_irq_count(cpu) (cpu_data[cpu].irq_count) +#define irq_attempt(cpu, irq) (cpu_data[cpu].irq_attempt[irq]) #endif /* diff -urN 2.3.42/include/asm-alpha/hw_irq.h 2.3.42-alpha-irq/include/asm-alpha/hw_irq.h --- 2.3.42/include/asm-alpha/hw_irq.h Tue Jan 18 01:15:08 2000 +++ 2.3.42-alpha-irq/include/asm-alpha/hw_irq.h Thu Feb 3 20:12:30 2000 @@ -18,21 +18,22 @@ outb(0, DMA1_CLR_MASK_REG); \ outb(0, DMA2_CLR_MASK_REG) -extern unsigned long _alpha_irq_masks[2]; -#define alpha_irq_mask _alpha_irq_masks[0] - extern void common_ack_irq(unsigned long irq); extern void isa_device_interrupt(unsigned long vector, struct pt_regs * regs); extern void srm_device_interrupt(unsigned long vector, struct pt_regs * regs); -extern void handle_irq(int irq, int ack, struct pt_regs * regs); +extern void handle_irq(int irq, struct pt_regs * regs); #define RTC_IRQ 8 +#if 0 /* on Alpha we want to use only the RTC as timer for SMP issues */ #ifdef CONFIG_RTC #define TIMER_IRQ 0 /* timer is the pit */ #else #define TIMER_IRQ RTC_IRQ /* timer is the rtc */ #endif +#else +#define TIMER_IRQ RTC_IRQ /* timer is the rtc */ +#endif /* * PROBE_MASK is the bitset of irqs that we consider for autoprobing. @@ -71,10 +72,11 @@ #endif -extern char _stext; static inline void alpha_do_profile (unsigned long pc) { if (prof_buffer && current->pid) { + extern char _stext; + pc -= (unsigned long) &_stext; pc >>= prof_shift; /* @@ -87,5 +89,10 @@ atomic_inc((atomic_t *)&prof_buffer[pc]); } } + +static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} +extern void no_action(int cpl, void *dev_id, struct pt_regs *regs); +extern void init_ISA_irqs(void); +extern void init_RTC_irq(void); #endif diff -urN 2.3.42/include/asm-alpha/smp.h 2.3.42-alpha-irq/include/asm-alpha/smp.h --- 2.3.42/include/asm-alpha/smp.h Tue Jan 18 01:11:24 2000 +++ 2.3.42-alpha-irq/include/asm-alpha/smp.h Thu Feb 3 20:12:30 2000 @@ -20,6 +20,7 @@ #ifdef __SMP__ #include +#include struct cpuinfo_alpha { unsigned long loops_per_sec; @@ -28,6 +29,8 @@ unsigned long *pte_cache; unsigned long pgtable_cache_sz; unsigned long ipi_count; + unsigned long irq_attempt[NR_IRQS]; + unsigned long smp_local_irq_count; unsigned long prof_multiplier; unsigned long prof_counter; int irq_count, bh_count; diff -urN 2.3.42/include/linux/irq.h 2.3.42-alpha-irq/include/linux/irq.h --- 2.3.42/include/linux/irq.h Tue Jan 18 01:38:08 2000 +++ 2.3.42-alpha-irq/include/linux/irq.h Thu Feb 3 20:12:30 2000 @@ -11,6 +11,7 @@ #define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ #define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ +#define IRQ_LEVEL 64 /* IRQ level triggered */ /* * Interrupt controller descriptor. This is all we need