diff -urN linux-2.2.4/CREDITS linux/CREDITS --- linux-2.2.4/CREDITS Tue Mar 23 06:18:17 1999 +++ linux/CREDITS Wed Mar 24 10:22:53 1999 @@ -256,6 +256,10 @@ D: Configuration help text support D: Linux CD and Support Giveaway List +N: Zoltan Boszormenyi +E: zboszor@mol.hu +D: MTRR emulation with Cyrix style ARR registers + N: John Boyd E: boyd@cis.ohio-state.edu D: Co-author of wd7000 SCSI driver diff -urN linux-2.2.4/Documentation/Configure.help linux/Documentation/Configure.help --- linux-2.2.4/Documentation/Configure.help Mon Mar 22 02:22:00 1999 +++ linux/Documentation/Configure.help Wed Mar 24 10:22:53 1999 @@ -8469,17 +8469,24 @@ MTRR control and configuration CONFIG_MTRR - On Intel Pentium Pro and Pentium II systems the Memory Type Range - Registers (MTRRs) may be used to control processor access to memory - ranges. This is most useful when you have a video (VGA) card on a - PCI or AGP bus. Enabling write-combining allows bus write transfers - to be combined into a larger transfer before bursting over the - PCI/AGP bus. This can increase performance of image write operations - 2.5 times or more. This option creates a /proc/mtrr file which may - be used to manipulate your MTRRs. Typically the X server should use - this. This should have a reasonably generic interface so that - similar control registers on other processors can be easily - supported. + On Intel P6 family processors (Pentium Pro, Pentium II and later) + the Memory Type Range Registers (MTRRs) may be used to control + processor access to memory ranges. This is most useful when you have + a video (VGA) card on a PCI or AGP bus. Enabling write-combining + allows bus write transfers to be combined into a larger transfer + before bursting over the PCI/AGP bus. This can increase performance + of image write operations 2.5 times or more. This option creates a + /proc/mtrr file which may be used to manipulate your + MTRRs. Typically the X server should use this. This should have a + reasonably generic interface so that similar control registers on + other processors can be easily supported. + + The Cyrix 6x86, 6x86MX and M II processors have Address Range + Registers (ARRs) which provide a similar functionality to MTRRs. For + these, the ARRs are used to emulate the MTRRs. + + The AMD K6-2 (stepping 8 and above) and K6-3 processors have two + MTRRs. These are supported. Saying Y here also fixes a problem with buggy SMP BIOSes which only set the MTRRs for the boot CPU and not the secondary CPUs. This can diff -urN linux-2.2.4/Documentation/mtrr.txt linux/Documentation/mtrr.txt --- linux-2.2.4/Documentation/mtrr.txt Sun May 3 10:42:08 1998 +++ linux/Documentation/mtrr.txt Wed Mar 24 10:22:54 1999 @@ -62,6 +62,23 @@ ioctl() interface, so users won't have to do anything. If you use a commercial X server, lobby your vendor to add support for MTRRs. =============================================================================== +Creating overlapping MTRRs: + +%echo "base=0xfb000000 size=0x1000000 type=write-combining" >/proc/mtrr +%echo "base=0xfb000000 size=0x1000 type=uncachable" >/proc/mtrr + +And the results: cat /proc/mtrr +reg00: base=0x00000000 ( 0MB), size= 64MB: write-back, count=1 +reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1 +reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1 + +Some cards (especially Voodoo Graphics boards) need this 4 kB area +excluded from the beginning of the region because it is used for +registers. + +NOTE: You can only create type=uncachable region, if the first +region that you created is type=write-combining. +=============================================================================== Removing MTRRs from the shell: % echo "disable=2" >! /proc/mtrr =============================================================================== diff -urN linux-2.2.4/arch/i386/kernel/i386_ksyms.c linux/arch/i386/kernel/i386_ksyms.c --- linux-2.2.4/arch/i386/kernel/i386_ksyms.c Thu Mar 11 10:07:19 1999 +++ linux/arch/i386/kernel/i386_ksyms.c Wed Mar 24 10:22:54 1999 @@ -91,7 +91,7 @@ EXPORT_SYMBOL(__global_sti); EXPORT_SYMBOL(__global_save_flags); EXPORT_SYMBOL(__global_restore_flags); -EXPORT_SYMBOL(mtrr_hook); +EXPORT_SYMBOL(smp_call_function); #endif #ifdef CONFIG_MCA diff -urN linux-2.2.4/arch/i386/kernel/irq.c linux/arch/i386/kernel/irq.c --- linux-2.2.4/arch/i386/kernel/irq.c Wed Feb 17 09:20:05 1999 +++ linux/arch/i386/kernel/irq.c Wed Mar 24 10:22:54 1999 @@ -323,7 +323,7 @@ BUILD_SMP_INTERRUPT(reschedule_interrupt) BUILD_SMP_INTERRUPT(invalidate_interrupt) BUILD_SMP_INTERRUPT(stop_cpu_interrupt) -BUILD_SMP_INTERRUPT(mtrr_interrupt) +BUILD_SMP_INTERRUPT(call_function_interrupt) BUILD_SMP_INTERRUPT(spurious_interrupt) /* @@ -1084,8 +1084,8 @@ /* self generated IPI for local APIC timer */ set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); - /* IPI for MTRR control */ - set_intr_gate(MTRR_CHANGE_VECTOR, mtrr_interrupt); + /* IPI for generic function call */ + set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); /* IPI vector for APIC spurious interrupts */ set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); diff -urN linux-2.2.4/arch/i386/kernel/irq.h linux/arch/i386/kernel/irq.h --- linux-2.2.4/arch/i386/kernel/irq.h Wed Mar 24 08:55:30 1999 +++ linux/arch/i386/kernel/irq.h Wed Mar 24 10:22:54 1999 @@ -64,7 +64,7 @@ #define INVALIDATE_TLB_VECTOR 0x31 #define STOP_CPU_VECTOR 0x40 #define LOCAL_TIMER_VECTOR 0x41 -#define MTRR_CHANGE_VECTOR 0x50 +#define CALL_FUNCTION_VECTOR 0x50 /* * First APIC vector available to drivers: (vectors 0x51-0xfe) @@ -98,7 +98,6 @@ extern int i8259A_irq_pending(unsigned int irq); extern void ack_APIC_irq(void); extern void FASTCALL(send_IPI_self(int vector)); -extern void smp_send_mtrr(void); extern void init_VISWS_APIC_irqs(void); extern void setup_IO_APIC(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); diff -urN linux-2.2.4/arch/i386/kernel/mtrr.c linux/arch/i386/kernel/mtrr.c --- linux-2.2.4/arch/i386/kernel/mtrr.c Mon Dec 28 17:45:13 1998 +++ linux/arch/i386/kernel/mtrr.c Wed Mar 24 10:22:54 1999 @@ -132,6 +132,70 @@ Fixed harmless compiler warning in include/asm-i386/mtrr.h Fixed version numbering and history for v1.23 -> v1.24. v1.26 + 19990118 Richard Gooch + PLACEHOLDER. + v1.27 + 19990123 Richard Gooch + Changed locking to spin with reschedule. + Made use of new . + v1.28 + 19990201 Zoltan Boszormenyi + Extended the driver to be able to use Cyrix style ARRs. + 19990204 Richard Gooch + Restructured Cyrix support. + v1.29 + 19990204 Zoltan Boszormenyi + Refined ARR support: enable MAPEN in set_mtrr_prepare() + and disable MAPEN in set_mtrr_done(). + 19990205 Richard Gooch + Minor cleanups. + v1.30 + 19990208 Zoltan Boszormenyi + Protect plain 6x86s (and other processors without the + Page Global Enable feature) against accessing CR4 in + set_mtrr_prepare() and set_mtrr_done(). + 19990210 Richard Gooch + Turned and into function pointers. + v1.31 + 19990212 Zoltan Boszormenyi + Major rewrite of cyrix_arr_init(): do not touch ARRs, + leave them as the BIOS have set them up. + Enable usage of all 8 ARRs. + Avoid multiplications by 3 everywhere and other + code clean ups/speed ups. + 19990213 Zoltan Boszormenyi + Set up other Cyrix processors identical to the boot cpu. + Since Cyrix don't support Intel APIC, this is l'art pour l'art. + Weigh ARRs by size: + If size <= 32M is given, set up ARR# we were given. + If size > 32M is given, set up ARR7 only if it is free, + fail otherwise. + 19990214 Zoltan Boszormenyi + Also check for size >= 256K if we are to set up ARR7, + mtrr_add() returns the value it gets from set_mtrr() + 19990218 Zoltan Boszormenyi + Remove Cyrix "coma bug" workaround from here. + Moved to linux/arch/i386/kernel/setup.c and + linux/include/asm-i386/bugs.h + 19990228 Richard Gooch + Added #ifdef CONFIG_DEVFS_FS + Added MTRRIOC_KILL_ENTRY ioctl(2) + Trap for counter underflow in . + Trap for 4 MiB aligned regions for PPro, stepping <= 7. + 19990301 Richard Gooch + Created hook. + 19990305 Richard Gooch + Temporarily disable AMD support now MTRR capability flag is set. + v1.32 + 19990308 Zoltan Boszormenyi + Adjust my changes (19990212-19990218) to Richard Gooch's + latest changes. (19990228-19990305) + v1.33 + 19990309 Richard Gooch + Fixed typo in message. + 19990310 Richard Gooch + Support K6-II/III based on Alan Cox's patches. + v1.34 */ #include #include @@ -167,7 +231,7 @@ #include #include "irq.h" -#define MTRR_VERSION "1.26 (19981001)" +#define MTRR_VERSION "1.34 (19990310)" #define TRUE 1 #define FALSE 0 @@ -207,9 +271,12 @@ #ifdef __SMP__ # define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type) #else -# define set_mtrr(reg,base,size,type) set_mtrr_up (reg, base, size, type,TRUE) +# define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \ + TRUE) #endif +#define spin_lock_reschedule(lock) while (!spin_trylock(lock)) schedule (); + #ifndef CONFIG_PROC_FS # define compute_ascii() while (0) #endif @@ -233,6 +300,7 @@ unsigned long deftype_lo; unsigned long deftype_hi; unsigned long cr4val; + unsigned long ccr3; }; /* @@ -259,23 +327,26 @@ : "c" (counter)) -/* Put the processor into a state where MTRRs can be safely set. */ -static void set_mtrr_prepare(struct set_mtrr_context *ctxt) +/* Put the processor into a state where MTRRs can be safely set */ +static void set_mtrr_prepare (struct set_mtrr_context *ctxt) { unsigned long tmp; - /* disable interrupts locally */ + /* Disable interrupts locally */ __save_flags (ctxt->flags); __cli (); - /* save value of CR4 and clear Page Global Enable (bit 7) */ - asm volatile ("movl %%cr4, %0\n\t" - "movl %0, %1\n\t" - "andb $0x7f, %b1\n\t" - "movl %1, %%cr4\n\t" - : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory"); + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return; + + /* Save value of CR4 and clear Page Global Enable (bit 7) */ + if (boot_cpu_data.x86_capability & X86_FEATURE_PGE) + asm volatile ("movl %%cr4, %0\n\t" + "movl %0, %1\n\t" + "andb $0x7f, %b1\n\t" + "movl %1, %%cr4\n\t" + : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory"); - /* disable and flush caches. Note that wbinvd flushes the TLBs as - a side-effect. */ + /* Disable and flush caches. Note that wbinvd flushes the TLBs as + a side-effect */ asm volatile ("movl %%cr0, %0\n\t" "orl $0x40000000, %0\n\t" "wbinvd\n\t" @@ -283,64 +354,108 @@ "wbinvd\n\t" : "=r" (tmp) : : "memory"); - /* disable MTRRs, and set the default type to uncached. */ - rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); - wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + /* Disable MTRRs, and set the default type to uncached */ + rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); + break; + case X86_VENDOR_CYRIX: + tmp = getCx86 (CX86_CCR3); + setCx86 (CX86_CCR3, (tmp & 0x0f) | 0x10); + ctxt->ccr3 = tmp; + break; + } } /* End Function set_mtrr_prepare */ - -/* Restore the processor after a set_mtrr_prepare */ -static void set_mtrr_done(struct set_mtrr_context *ctxt) +/* Restore the processor after a set_mtrr_prepare */ +static void set_mtrr_done (struct set_mtrr_context *ctxt) { unsigned long tmp; - /* flush caches and TLBs */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + { + __restore_flags (ctxt->flags); + return; + } + + /* Flush caches and TLBs */ asm volatile ("wbinvd" : : : "memory" ); - /* restore MTRRdefType */ - wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + /* Restore MTRRdefType */ + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + break; + case X86_VENDOR_CYRIX: + setCx86 (CX86_CCR3, ctxt->ccr3); + break; + } - /* enable caches */ + /* Enable caches */ asm volatile ("movl %%cr0, %0\n\t" "andl $0xbfffffff, %0\n\t" "movl %0, %%cr0\n\t" : "=r" (tmp) : : "memory"); - /* restore value of CR4 */ - asm volatile ("movl %0, %%cr4" - : : "r" (ctxt->cr4val) : "memory"); + /* Restore value of CR4 */ + if (boot_cpu_data.x86_capability & X86_FEATURE_PGE) + asm volatile ("movl %0, %%cr4" + : : "r" (ctxt->cr4val) : "memory"); - /* re-enable interrupts locally (if enabled previously) */ + /* Re-enable interrupts locally (if enabled previously) */ __restore_flags (ctxt->flags); } /* End Function set_mtrr_done */ - -/* this function returns the number of variable MTRRs */ +/* This function returns the number of variable MTRRs */ static unsigned int get_num_var_ranges (void) { unsigned long config, dummy; - rdmsr(MTRRcap_MSR, config, dummy); - return (config & 0xff); + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + rdmsr (MTRRcap_MSR, config, dummy); + return (config & 0xff); + /*break;*/ + case X86_VENDOR_CYRIX: + /* Cyrix have 8 ARRs */ + return 8; + /*break;*/ + case X86_VENDOR_AMD: + return 2; + /*break;*/ + } + return 0; } /* End Function get_num_var_ranges */ - -/* non-zero if we have the write-combining memory type. */ +/* Returns non-zero if we have the write-combining memory type */ static int have_wrcomb (void) { unsigned long config, dummy; - rdmsr(MTRRcap_MSR, config, dummy); - return (config & (1<<10)); -} - + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + rdmsr (MTRRcap_MSR, config, dummy); + return (config & (1<<10)); + /*break;*/ + case X86_VENDOR_CYRIX: + case X86_VENDOR_AMD: + return 1; + /*break;*/ + } + return 0; +} /* End Function have_wrcomb */ -static void get_mtrr (unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type *type) +static void intel_get_mtrr (unsigned int reg, unsigned long *base, + unsigned long *size, mtrr_type *type) { unsigned long dummy, mask_lo, base_lo; - rdmsr(MTRRphysMask_MSR(reg), mask_lo, dummy); + rdmsr (MTRRphysMask_MSR(reg), mask_lo, dummy); if ((mask_lo & 0x800) == 0) { /* Invalid (i.e. free) range. */ *base = 0; @@ -364,11 +479,104 @@ *base = (base_lo & 0xfffff000UL); *type = (base_lo & 0xff); -} /* End Function get_mtrr */ +} /* End Function intel_get_mtrr */ +static void cyrix_get_arr (unsigned int reg, unsigned long *base, + unsigned long *size, mtrr_type *type) +{ + unsigned long flags; + unsigned char arr, ccr3, rcr, shift; + + arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ + + /* Save flags and disable interrupts */ + __save_flags (flags); __cli (); + + ccr3 = getCx86 (CX86_CCR3); + setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ + ((unsigned char *) base)[3] = getCx86 (arr); + ((unsigned char *) base)[2] = getCx86 (arr+1); + ((unsigned char *) base)[1] = getCx86 (arr+2); + rcr = getCx86(CX86_RCR_BASE + reg); + setCx86 (CX86_CCR3, ccr3); /* disable MAPEN */ + + /* Enable interrupts if it was enabled previously */ + __restore_flags (flags); + + shift = ((unsigned char *) base)[1] & 0x0f; + *base &= 0xfffff000UL; -static void set_mtrr_up (unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type, int do_safe) + /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 + * Note: shift==0xf means 4G, this is unsupported. + */ + if (shift) + *size = (reg < 7 ? 0x800UL : 0x20000UL) << shift; + else + *size = 0; + + /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */ + if (reg < 7) { + switch (rcr) { + case 1: *type = MTRR_TYPE_UNCACHABLE; break; + case 8: *type = MTRR_TYPE_WRBACK; break; + case 9: *type = MTRR_TYPE_WRCOMB; break; + case 24: + default: *type = MTRR_TYPE_WRTHROUGH; break; + } + } else { + switch (rcr) { + case 0: *type = MTRR_TYPE_UNCACHABLE; break; + case 8: *type = MTRR_TYPE_WRCOMB; break; + case 9: *type = MTRR_TYPE_WRBACK; break; + case 25: + default: *type = MTRR_TYPE_WRTHROUGH; break; + } + } +} /* End Function cyrix_get_arr */ + +static void amd_get_mtrr (unsigned int reg, unsigned long *base, + unsigned long *size, mtrr_type *type) +{ + unsigned long low, high; + + rdmsr (0xC0000085, low, high); + /* Upper dword is region 1, lower is region 0 */ + if (reg == 1) low = high; + /* The base masks off on the right alignment */ + *base = low & 0xFFFE0000; + *type = 0; + if (low & 1) *type = MTRR_TYPE_UNCACHABLE; + if (low & 2) *type = MTRR_TYPE_WRCOMB; + if ( !(low & 3) ) + { + *size = 0; + return; + } + /* + * This needs a little explaining. The size is stored as an + * inverted mask of bits of 128K granularity 15 bits long offset + * 2 bits + * + * So to get a size we do invert the mask and add 1 to the lowest + * mask bit (4 as its 2 bits in). This gives us a size we then shift + * to turn into 128K blocks + * + * eg 111 1111 1111 1100 is 512K + * + * invert 000 0000 0000 0011 + * +1 000 0000 0000 0100 + * *128K ... + */ + low = (~low) & 0x1FFFC; + *size = (low + 4) << 15; + return; +} /* End Function amd_get_mtrr */ + +static void (*get_mtrr) (unsigned int reg, unsigned long *base, + unsigned long *size, mtrr_type *type) = NULL; + +static void intel_set_mtrr_up (unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type, int do_safe) /* [SUMMARY] Set variable MTRR register on the local CPU. The register to set. The base address of the region. @@ -376,6 +584,7 @@ The type of the region. If TRUE, do the change safely. If FALSE, safety measures should be done externally. + [RETURNS] Nothing. */ { struct set_mtrr_context ctxt; @@ -393,8 +602,92 @@ wrmsr (MTRRphysMask_MSR (reg), ~(size - 1) | 0x800, 0); } if (do_safe) set_mtrr_done (&ctxt); -} /* End Function set_mtrr_up */ +} /* End Function intel_set_mtrr_up */ + +static void cyrix_set_arr_up (unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type, int do_safe) +{ + struct set_mtrr_context ctxt; + unsigned char arr, arr_type, arr_size; + + arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ + + /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */ + size >>= (reg < 7 ? 12 : 18); + size &= 0x7fff; /* make sure arr_size <= 14 */ + for(arr_size = 0; size; arr_size++, size >>= 1); + + if (reg<7) { + switch (type) { + case MTRR_TYPE_UNCACHABLE: arr_type = 1; break; + case MTRR_TYPE_WRCOMB: arr_type = 9; break; + case MTRR_TYPE_WRTHROUGH: arr_type = 24; break; + default: arr_type = 8; break; + } + } else { + switch (type) { + case MTRR_TYPE_UNCACHABLE: arr_type = 0; break; + case MTRR_TYPE_WRCOMB: arr_type = 8; break; + case MTRR_TYPE_WRTHROUGH: arr_type = 25; break; + default: arr_type = 9; break; + } + } + + if (do_safe) set_mtrr_prepare (&ctxt); + setCx86(arr, ((unsigned char *) &base)[3]); + setCx86(arr+1, ((unsigned char *) &base)[2]); + setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size); + setCx86(CX86_RCR_BASE + reg, arr_type); + if (do_safe) set_mtrr_done (&ctxt); +} /* End Function cyrix_set_arr_up */ + +static void amd_set_mtrr_up (unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type, int do_safe) +/* [SUMMARY] Set variable MTRR register on the local CPU. + The register to set. + The base address of the region. + The size of the region. If this is 0 the region is disabled. + The type of the region. + If TRUE, do the change safely. If FALSE, safety measures should + be done externally. + [RETURNS] Nothing. +*/ +{ + u32 low, high; + struct set_mtrr_context ctxt; + if (do_safe) set_mtrr_prepare (&ctxt); + /* + * Low is MTRR0 , High MTRR 1 + */ + rdmsr (0xC0000085, low, high); + /* + * Blank to disable + */ + if (size == 0) + *(reg ? &high : &low) = 0; + else + /* Set the register to the base (already shifted for us), the + type (off by one) and an inverted bitmask of the size + + The size is the only odd bit. We are fed say 512K + We invert this and we get 111 1111 1111 1011 but + if you subtract one and invert you get the desired + 111 1111 1111 1100 mask + */ + *(reg ? &high : &low)=(((~(size-1))>>15)&0x0001FFFC)|base|(type+1); + /* + * The writeback rule is quite specific. See the manual. Its + * disable local interrupts, write back the cache, set the mtrr + */ + __asm__ __volatile__ ("wbinvd" : : : "memory"); + wrmsr (0xC0000085, low, high); + if (do_safe) set_mtrr_done (&ctxt); +} /* End Function amd_set_mtrr_up */ + +static void (*set_mtrr_up) (unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type, + int do_safe) = NULL; #ifdef __SMP__ @@ -407,7 +700,7 @@ }; -/* Get the MSR pair relating to a var range. */ +/* Get the MSR pair relating to a var range */ __initfunc(static void get_mtrr_var_range (unsigned int index, struct mtrr_var_range *vr)) { @@ -416,8 +709,8 @@ } /* End Function get_mtrr_var_range */ -/* Set the MSR pair relating to a var range. Returns TRUE if - changes are made. */ +/* Set the MSR pair relating to a var range. Returns TRUE if + changes are made */ __initfunc(static int set_mtrr_var_range_testing (unsigned int index, struct mtrr_var_range *vr)) { @@ -441,8 +734,7 @@ } return changed; -} - +} /* End Function set_mtrr_var_range_testing */ __initfunc(static void get_fixed_ranges(mtrr_type *frs)) { @@ -456,8 +748,7 @@ for (i = 0; i < 8; i++) rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]); -} - +} /* End Function get_fixed_ranges */ __initfunc(static int set_fixed_ranges_testing(mtrr_type *frs)) { @@ -487,10 +778,8 @@ changed = TRUE; } } - return changed; -} - +} /* End Function set_fixed_ranges_testing */ struct mtrr_state { @@ -502,7 +791,7 @@ }; -/* Grab all of the MTRR state for this CPU into *state. */ +/* Grab all of the MTRR state for this CPU into *state */ __initfunc(static void get_mtrr_state(struct mtrr_state *state)) { unsigned int nvrs, i; @@ -511,22 +800,22 @@ nvrs = state->num_var_ranges = get_num_var_ranges(); vrs = state->var_ranges - = kmalloc(nvrs * sizeof(struct mtrr_var_range), GFP_KERNEL); + = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL); if (vrs == NULL) nvrs = state->num_var_ranges = 0; for (i = 0; i < nvrs; i++) - get_mtrr_var_range(i, &vrs[i]); + get_mtrr_var_range (i, &vrs[i]); - get_fixed_ranges(state->fixed_ranges); + get_fixed_ranges (state->fixed_ranges); - rdmsr(MTRRdefType_MSR, lo, dummy); + rdmsr (MTRRdefType_MSR, lo, dummy); state->def_type = (lo & 0xff); state->enabled = (lo & 0xc00) >> 10; } /* End Function get_mtrr_state */ -/* Free resources associated with a struct mtrr_state */ +/* Free resources associated with a struct mtrr_state */ __initfunc(static void finalize_mtrr_state(struct mtrr_state *state)) { if (state->var_ranges) kfree (state->var_ranges); @@ -546,14 +835,14 @@ unsigned long change_mask = 0; for (i = 0; i < state->num_var_ranges; i++) - if (set_mtrr_var_range_testing(i, &state->var_ranges[i])) + if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) ) change_mask |= MTRR_CHANGE_MASK_VARIABLE; - if (set_fixed_ranges_testing(state->fixed_ranges)) + if ( set_fixed_ranges_testing(state->fixed_ranges) ) change_mask |= MTRR_CHANGE_MASK_FIXED; - /* set_mtrr_restore restores the old value of MTRRdefType, - so to set it we fiddle with the saved value. */ + /* Set_mtrr_restore restores the old value of MTRRdefType, + so to set it we fiddle with the saved value */ if ((ctxt->deftype_lo & 0xff) != state->def_type || ((ctxt->deftype_lo & 0xc00) >> 10) != state->enabled) { @@ -566,76 +855,63 @@ static atomic_t undone_count; -static void (*handler_func) (struct set_mtrr_context *ctxt, void *info); -static void *handler_info; static volatile int wait_barrier_execute = FALSE; static volatile int wait_barrier_cache_enable = FALSE; -static void sync_handler (void) +struct set_mtrr_data +{ + unsigned long smp_base; + unsigned long smp_size; + unsigned int smp_reg; + mtrr_type smp_type; +}; + +static void ipi_handler (void *info) /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. [RETURNS] Nothing. */ { + struct set_mtrr_data *data = info; struct set_mtrr_context ctxt; set_mtrr_prepare (&ctxt); - /* Notify master CPU that I'm at the barrier and then wait */ + /* Notify master that I've flushed and disabled my cache */ atomic_dec (&undone_count); while (wait_barrier_execute) barrier (); /* The master has cleared me to execute */ - (*handler_func) (&ctxt, handler_info); + (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size, + data->smp_type, FALSE); /* Notify master CPU that I've executed the function */ atomic_dec (&undone_count); /* Wait for master to clear me to enable cache and return */ while (wait_barrier_cache_enable) barrier (); set_mtrr_done (&ctxt); -} /* End Function sync_handler */ +} /* End Function ipi_handler */ -static void do_all_cpus (void (*handler) (struct set_mtrr_context *ctxt, - void *info), - void *info, int local) -/* [SUMMARY] Execute a function on all CPUs, with caches flushed and disabled. - [PURPOSE] This function will synchronise all CPUs, flush and disable caches - on all CPUs, then call a specified function. When the specified function - finishes on all CPUs, caches are enabled on all CPUs. - The function to execute. - An arbitrary information pointer which is passed to <>. - If TRUE <> is executed locally. - [RETURNS] Nothing. -*/ +static void set_mtrr_smp (unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type) { - unsigned long timeout; + struct set_mtrr_data data; struct set_mtrr_context ctxt; - mtrr_hook = sync_handler; - handler_func = handler; - handler_info = info; + data.smp_reg = reg; + data.smp_base = base; + data.smp_size = size; + data.smp_type = type; wait_barrier_execute = TRUE; wait_barrier_cache_enable = TRUE; - /* Send a message to all other CPUs and wait for them to enter the - barrier */ atomic_set (&undone_count, smp_num_cpus - 1); - smp_send_mtrr(); - /* Wait for it to be done */ - timeout = jiffies + JIFFIE_TIMEOUT; - while ( (atomic_read (&undone_count) > 0) && - time_before(jiffies, timeout) ) - barrier (); - if (atomic_read (&undone_count) > 0) - { + /* Flush and disable the local CPU's cache and start the ball rolling on + other CPUs */ + set_mtrr_prepare (&ctxt); + if (smp_call_function (ipi_handler, &data, 1, 0) != 0) panic ("mtrr: timed out waiting for other CPUs\n"); - } - mtrr_hook = NULL; - /* All other CPUs should be waiting for the barrier, with their caches - already flushed and disabled. Prepare for function completion - notification */ + /* Wait for all other CPUs to flush and disable their caches */ + while (atomic_read (&undone_count) > 0) barrier (); + /* Set up for completion wait and then release other CPUs to change MTRRs*/ atomic_set (&undone_count, smp_num_cpus - 1); - /* Flush and disable the local CPU's cache and release the barier, which - should cause the other CPUs to execute the function. Also execute it - locally if required */ - set_mtrr_prepare (&ctxt); wait_barrier_execute = FALSE; - if (local) (*handler) (&ctxt, info); + (*set_mtrr_up) (reg, base, size, type, FALSE); /* Now wait for other CPUs to complete the function */ while (atomic_read (&undone_count) > 0) barrier (); /* Now all CPUs should have finished the function. Release the barrier to @@ -643,41 +919,10 @@ then enable the local cache and return */ wait_barrier_cache_enable = FALSE; set_mtrr_done (&ctxt); - handler_func = NULL; - handler_info = NULL; -} /* End Function do_all_cpus */ - - -struct set_mtrr_data -{ - unsigned long smp_base; - unsigned long smp_size; - unsigned int smp_reg; - mtrr_type smp_type; -}; - -static void set_mtrr_handler (struct set_mtrr_context *ctxt, void *info) -{ - struct set_mtrr_data *data = info; - - set_mtrr_up (data->smp_reg, data->smp_base, data->smp_size, data->smp_type, - FALSE); -} /* End Function set_mtrr_handler */ - -static void set_mtrr_smp (unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -{ - struct set_mtrr_data data; - - data.smp_reg = reg; - data.smp_base = base; - data.smp_size = size; - data.smp_type = type; - do_all_cpus (set_mtrr_handler, &data, TRUE); } /* End Function set_mtrr_smp */ -/* Some BIOS's are fucked and don't set all MTRRs the same! */ +/* Some BIOS's are fucked and don't set all MTRRs the same! */ __initfunc(static void mtrr_state_warn (unsigned long mask)) { if (!mask) return; @@ -720,6 +965,58 @@ #endif } /* End Function init_table */ +static int generic_get_free_region (unsigned long base, unsigned long size) +/* [SUMMARY] Get a free MTRR. + The starting (base) address of the region. + The size (in bytes) of the region. + [RETURNS] The index of the region on success, else -1 on error. +*/ +{ + int i, max; + mtrr_type ltype; + unsigned long lbase, lsize; + + max = get_num_var_ranges (); + for (i = 0; i < max; ++i) + { + (*get_mtrr) (i, &lbase, &lsize, <ype); + if (lsize < 1) return i; + } + return -ENOSPC; +} /* End Function generic_get_free_region */ + +static int cyrix_get_free_region (unsigned long base, unsigned long size) +/* [SUMMARY] Get a free ARR. + The starting (base) address of the region. + The size (in bytes) of the region. + [RETURNS] The index of the region on success, else -1 on error. +*/ +{ + int i; + mtrr_type ltype; + unsigned long lbase, lsize; + + /* If we are to set up a region >32M then look at ARR7 immediately */ + if (size > 0x2000000UL) { + cyrix_get_arr (7, &lbase, &lsize, <ype); + if (lsize < 1) return 7; + /* else try ARR0-ARR6 first */ + } else { + for (i = 0; i < 7; i++) + { + cyrix_get_arr (i, &lbase, &lsize, <ype); + if (lsize < 1) return i; + } + /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ + cyrix_get_arr (i, &lbase, &lsize, <ype); + if ((lsize < 1) && (size >= 0x40000)) return i; + } + return -ENOSPC; +} /* End Function cyrix_get_free_region */ + +static int (*get_free_region) (unsigned long base, + unsigned long size) = generic_get_free_region; + int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment) /* [SUMMARY] Add an MTRR entry. @@ -738,28 +1035,57 @@ unsigned long lbase, lsize, last; if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV; - if ( (base & 0xfff) || (size & 0xfff) ) + switch (boot_cpu_data.x86_vendor) { - printk ("mtrr: size and base must be multiples of 4kB\n"); - printk ("mtrr: size: %lx base: %lx\n", size, base); - return -EINVAL; - } - if (base + size < 0x100000) - { - printk ("mtrr: cannot set region below 1 MByte (0x%lx,0x%lx)\n", - base, size); - return -EINVAL; - } - /* Check upper bits of base and last are equal and lower bits are 0 for - base and 1 for last */ - last = base + size - 1; - for (lbase = base; !(lbase & 1) && (last & 1); - lbase = lbase >> 1, last = last >> 1); - if (lbase != last) - { - printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n", - base, size); + case X86_VENDOR_INTEL: + /* For Intel PPro stepping <= 7, must be 4 MiB aligned */ + if ( (boot_cpu_data.x86 == 6) && (boot_cpu_data.x86_model == 1) && + (boot_cpu_data.x86_mask <= 7) && ( base & ( (1 << 22) - 1 ) ) ) + { + printk ("mtrr: base(0x%lx) is not 4 MiB aligned\n", base); + return -EINVAL; + } + /* Fall through */ + case X86_VENDOR_CYRIX: + if ( (base & 0xfff) || (size & 0xfff) ) + { + printk ("mtrr: size and base must be multiples of 4 kiB\n"); + printk ("mtrr: size: %lx base: %lx\n", size, base); + return -EINVAL; + } + if (base + size < 0x100000) + { + printk ("mtrr: cannot set region below 1 MiB (0x%lx,0x%lx)\n", + base, size); + return -EINVAL; + } + /* Check upper bits of base and last are equal and lower bits are 0 + for base and 1 for last */ + last = base + size - 1; + for (lbase = base; !(lbase & 1) && (last & 1); + lbase = lbase >> 1, last = last >> 1); + if (lbase != last) + { + printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n", + base, size); + return -EINVAL; + } + break; + case X86_VENDOR_AMD: + /* Apply the K6 block alignment and size rules + In order + o Uncached or gathering only + o 128K or bigger block + o Power of 2 block + o base suitably aligned to the power + */ + if (type > MTRR_TYPE_WRCOMB || size < (1 << 17) || + (size & ~(size-1))-size || (base & (size-1))) + return -EINVAL; + break; + default: return -EINVAL; + /*break;*/ } if (type >= MTRR_NUM_TYPES) { @@ -775,10 +1101,10 @@ increment = increment ? 1 : 0; max = get_num_var_ranges (); /* Search for existing MTRR */ - spin_lock (&main_lock); + spin_lock_reschedule (&main_lock); for (i = 0; i < max; ++i) { - get_mtrr (i, &lbase, &lsize, <ype); + (*get_mtrr) (i, &lbase, &lsize, <ype); if (base >= lbase + lsize) continue; if ( (base < lbase) && (base + size <= lbase) ) continue; /* At this point we know there is some kind of overlap/enclosure */ @@ -804,19 +1130,18 @@ return i; } /* Search for an empty MTRR */ - for (i = 0; i < max; ++i) + i = (*get_free_region) (base, size); + if (i < 0) { - get_mtrr (i, &lbase, &lsize, <ype); - if (lsize > 0) continue; - set_mtrr (i, base, size, type); - usage_table[i] = 1; - compute_ascii (); spin_unlock (&main_lock); + printk ("mtrr: no more MTRRs available\n"); return i; } + set_mtrr (i, base, size, type); + usage_table[i] = 1; + compute_ascii (); spin_unlock (&main_lock); - printk ("mtrr: no more MTRRs available\n"); - return -ENOSPC; + return i; } /* End Function mtrr_add */ int mtrr_del (int reg, unsigned long base, unsigned long size) @@ -836,13 +1161,13 @@ if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV; max = get_num_var_ranges (); - spin_lock (&main_lock); + spin_lock_reschedule (&main_lock); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { - get_mtrr (i, &lbase, &lsize, <ype); + (*get_mtrr) (i, &lbase, &lsize, <ype); if ( (lbase == base) && (lsize == size) ) { reg = i; @@ -862,7 +1187,7 @@ printk ("mtrr: register: %d too big\n", reg); return -EINVAL; } - get_mtrr (reg, &lbase, &lsize, <ype); + (*get_mtrr) (reg, &lbase, &lsize, <ype); if (lsize < 1) { spin_unlock (&main_lock); @@ -913,7 +1238,9 @@ reg = mtrr_del (-1, base, size); if (reg < 0) return reg; - if (fcount != NULL) --fcount[reg]; + if (fcount == NULL) return reg; + if (fcount[reg] < 1) return -EINVAL; + --fcount[reg]; return reg; } /* End Function mtrr_file_del */ @@ -1019,11 +1346,18 @@ err = mtrr_file_del (sentry.base, sentry.size, file); if (err < 0) return err; break; + case MTRRIOC_KILL_ENTRY: + if ( !suser () ) return -EPERM; + if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) ) + return -EFAULT; + err = mtrr_del (-1, sentry.base, sentry.size); + if (err < 0) return err; + break; case MTRRIOC_GET_ENTRY: if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) ) return -EFAULT; if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL; - get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type); + (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type); gentry.type = type; if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) ) return -EFAULT; @@ -1115,7 +1449,7 @@ max = get_num_var_ranges (); for (i = 0; i < max; i++) { - get_mtrr (i, &base, &size, &type); + (*get_mtrr) (i, &base, &size, &type); if (size < 1) usage_table[i] = 0; else { @@ -1148,23 +1482,165 @@ #ifdef __SMP__ +typedef struct { + unsigned long base; + unsigned long size; + mtrr_type type; +} arr_state_t; + +arr_state_t arr_state[8] __initdata = { + {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, + {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL} +}; + +unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 }; + +__initfunc(static void cyrix_arr_init_secondary(void)) +{ + struct set_mtrr_context ctxt; + int i; + + set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */ + + /* the CCRs are not contiguous */ + for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]); + for( ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]); + for(i=0; i<8; i++) + cyrix_set_arr_up(i, + arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE); + + set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */ +} /* End Function cyrix_arr_init_secondary */ + +#endif + +/* + * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection + * with the SMM (System Management Mode) mode. So we need the following: + * Check whether SMI_LOCK (CCR3 bit 0) is set + * if it is set, write a warning message: ARR3 cannot be changed! + * (it cannot be changed until the next processor reset) + * if it is reset, then we can change it, set all the needed bits: + * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset) + * - disable access to SMM memory (CCR1 bit 2 reset) + * - disable SMM mode (CCR1 bit 1 reset) + * - disable write protection of ARR3 (CCR6 bit 1 reset) + * - (maybe) disable ARR3 + * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set) + */ +__initfunc(static void cyrix_arr_init(void)) +{ + struct set_mtrr_context ctxt; + unsigned char ccr[7]; + int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 }; +#ifdef __SMP__ + int i; +#endif + + set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */ + + /* Save all CCRs locally */ + ccr[0] = getCx86 (CX86_CCR0); + ccr[1] = getCx86 (CX86_CCR1); + ccr[2] = getCx86 (CX86_CCR2); + ccr[3] = ctxt.ccr3; + ccr[4] = getCx86 (CX86_CCR4); + ccr[5] = getCx86 (CX86_CCR5); + ccr[6] = getCx86 (CX86_CCR6); + + if (ccr[3] & 1) + ccrc[3] = 1; + else { + /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and + * access to SMM memory through ARR3 (bit 7). + */ +/* + if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; } + if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; } + if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; } +*/ + if (ccr[6] & 0x02) { + ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3. */ + setCx86 (CX86_CCR6, ccr[6]); + } + /* Disable ARR3. */ + /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */ + } + /* If we changed CCR1 in memory, change it in the processor, too. */ + if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]); + + /* Enable ARR usage by the processor */ + if (!(ccr[5] & 0x20)) { + ccr[5] |= 0x20; ccrc[5] = 1; + setCx86 (CX86_CCR5, ccr[5]); + } + +#ifdef __SMP__ + for(i=0; i<7; i++) ccr_state[i] = ccr[i]; + for(i=0; i<8; i++) + cyrix_get_arr(i, + &arr_state[i].base, &arr_state[i].size, &arr_state[i].type); +#endif + + set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */ + + if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n"); + if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n"); +/* + if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n"); + if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n"); + if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n"); +*/ + if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n"); +} /* End Function cyrix_arr_init */ + +__initfunc(static void mtrr_setup (void)) +{ + printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION); + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + get_mtrr = intel_get_mtrr; + set_mtrr_up = intel_set_mtrr_up; + break; + case X86_VENDOR_CYRIX: + printk ("mtrr: Using Cyrix style ARRs\n"); + get_mtrr = cyrix_get_arr; + set_mtrr_up = cyrix_set_arr_up; + get_free_region = cyrix_get_free_region; + break; + case X86_VENDOR_AMD: + get_mtrr = amd_get_mtrr; + set_mtrr_up = amd_set_mtrr_up; + break; + } +} /* End Function mtrr_setup */ + +#ifdef __SMP__ + static volatile unsigned long smp_changes_mask __initdata = 0; static struct mtrr_state smp_mtrr_state __initdata = {0, 0}; __initfunc(void mtrr_init_boot_cpu (void)) { if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return; - printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION); - - get_mtrr_state (&smp_mtrr_state); + mtrr_setup (); + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + get_mtrr_state (&smp_mtrr_state); + break; + case X86_VENDOR_CYRIX: + cyrix_arr_init (); + break; + } } /* End Function mtrr_init_boot_cpu */ -__initfunc(void mtrr_init_secondary_cpu (void)) +__initfunc(static void intel_mtrr_init_secondary_cpu (void)) { unsigned long mask, count; struct set_mtrr_context ctxt; - if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return; /* Note that this is not ideal, since the cache is only flushed/disabled for this CPU while the MTRRs are changed, but changing this requires more invasive changes to the way the kernel boots */ @@ -1177,21 +1653,52 @@ if (mask & 0x01) set_bit (count, &smp_changes_mask); mask >>= 1; } -} /* End Function mtrr_init_secondary_cpu */ +} /* End Function intel_mtrr_init_secondary_cpu */ +__initfunc(void mtrr_init_secondary_cpu (void)) +{ + if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return; + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + intel_mtrr_init_secondary_cpu (); + break; + case X86_VENDOR_CYRIX: + /* This is _completely theoretical_! + * I assume here that one day Cyrix will support Intel APIC. + * In reality on non-Intel CPUs we won't even get to this routine. + * Hopefully no one will plug two Cyrix processors in a dual P5 board. + * :-) + */ + cyrix_arr_init_secondary (); + break; + default: + printk ("mtrr: SMP support incomplete for this vendor\n"); + break; + } +} /* End Function mtrr_init_secondary_cpu */ #endif /* __SMP__ */ __initfunc(int mtrr_init(void)) { if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return 0; -# ifndef __SMP__ - printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION); -# endif - # ifdef __SMP__ - finalize_mtrr_state (&smp_mtrr_state); - mtrr_state_warn (smp_changes_mask); -# endif /* __SMP__ */ + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_INTEL: + finalize_mtrr_state (&smp_mtrr_state); + mtrr_state_warn (smp_changes_mask); + break; + } +# else /* __SMP__ */ + mtrr_setup (); + switch (boot_cpu_data.x86_vendor) + { + case X86_VENDOR_CYRIX: + cyrix_arr_init (); + break; + } +# endif /* !__SMP__ */ # ifdef CONFIG_PROC_FS proc_register (&proc_root, &proc_root_mtrr); diff -urN linux-2.2.4/arch/i386/kernel/setup.c linux/arch/i386/kernel/setup.c --- linux-2.2.4/arch/i386/kernel/setup.c Tue Mar 23 06:18:17 1999 +++ linux/arch/i386/kernel/setup.c Wed Mar 24 10:24:15 1999 @@ -5,6 +5,10 @@ * * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean * and Martin Mares, November 1997. + * + * Force Cyrix 6x86(MX) and M II processors to report MTRR capability + * and fix against Cyrix "coma bug" by + * Zoltan Boszormenyi February 1999. */ /* @@ -408,6 +412,14 @@ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); c->x86_model_id[48] = 0; + /* Set MTRR capability flag if appropriate */ + if(boot_cpu_data.x86 !=5) + return 1; + if((boot_cpu_data.x86_model == 9) || + ((boot_cpu_data.x86_model == 8) && + (boot_cpu_data.x86_mask >= 8))) + c->x86_capability |= X86_FEATURE_MTRR; + return 1; } @@ -587,6 +599,10 @@ (c->x86_model)++; } else /* 686 */ p = Cx86_cb+1; + /* Emulate MTRRs using Cyrix's ARRs. */ + c->x86_capability |= X86_FEATURE_MTRR; + /* 6x86's contain this bug */ + c->coma_bug = 1; break; case 4: /* MediaGX/GXm */ @@ -611,11 +627,14 @@ case 5: /* 6x86MX/M II */ if (dir1 > 7) dir0_msn++; /* M II */ + else c->coma_bug = 1; /* 6x86MX, it has the bug. */ tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; p = Cx86_cb+tmp; if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) (c->x86_model)++; + /* Emulate MTRRs using Cyrix's ARRs. */ + c->x86_capability |= X86_FEATURE_MTRR; break; case 0xf: /* Cyrix 486 without DEVID registers */ @@ -869,7 +888,7 @@ int sep_bug; static char *x86_cap_flags[] = { "fpu", "vme", "de", "pse", "tsc", "msr", "6", "mce", - "cx8", "9", "10", "sep", "12", "pge", "14", "cmov", + "cx8", "9", "10", "sep", "mtrr", "pge", "14", "cmov", "16", "17", "psn", "19", "20", "21", "22", "mmx", "24", "kni", "26", "27", "28", "29", "30", "31" }; @@ -917,7 +936,6 @@ } else if (c->x86_vendor == X86_VENDOR_INTEL) { x86_cap_flags[6] = "pae"; x86_cap_flags[9] = "apic"; - x86_cap_flags[12] = "mtrr"; x86_cap_flags[14] = "mca"; x86_cap_flags[16] = "pat"; x86_cap_flags[17] = "pse36"; @@ -936,6 +954,7 @@ "hlt_bug\t\t: %s\n" "sep_bug\t\t: %s\n" "f00f_bug\t: %s\n" + "coma_bug\t: %s\n" "fpu\t\t: %s\n" "fpu_exception\t: %s\n" "cpuid level\t: %d\n" @@ -945,6 +964,7 @@ c->hlt_works_ok ? "no" : "yes", sep_bug ? "yes" : "no", c->f00f_bug ? "yes" : "no", + c->coma_bug ? "yes" : "no", c->hard_math ? "yes" : "no", (c->hard_math && ignore_irq13) ? "yes" : "no", c->cpuid_level, diff -urN linux-2.2.4/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c --- linux-2.2.4/arch/i386/kernel/smp.c Tue Mar 16 06:09:47 1999 +++ linux/arch/i386/kernel/smp.c Wed Mar 24 10:22:54 1999 @@ -42,6 +42,8 @@ #include "irq.h" +#define JIFFIE_TIMEOUT 100 + extern unsigned long start_kernel; extern void update_one_process( struct task_struct *p, unsigned long ticks, unsigned long user, @@ -1654,15 +1656,84 @@ send_IPI_allbutself(STOP_CPU_VECTOR); } -/* - * this function sends an 'reload MTRR state' IPI to all other CPUs - * in the system. it goes straight through, completion processing - * is done on the mttr.c level. +/* Structure and data for smp_call_function(). This is designed to minimise + * static memory requirements. It also looks cleaner. */ - -void smp_send_mtrr(void) +struct smp_call_function_struct { + void (*func) (void *info); + void *info; + atomic_t unstarted_count; + atomic_t unfinished_count; + int wait; +}; +static volatile struct smp_call_function_struct *smp_call_function_data = NULL; + +/* + * this function sends a 'generic call function' IPI to all other CPUs + * in the system. + */ + +int smp_call_function (void (*func) (void *info), void *info, int retry, + int wait) +/* [SUMMARY] Run a function on all other CPUs. + The function to run. This must be fast and non-blocking. + An arbitrary pointer to pass to the function. + If true, keep retrying until ready. + If true, wait until function has completed on other CPUs. + [RETURNS] 0 on success, else a negative status code. Does not return until + remote CPUs are nearly ready to execute <> or are or have executed. +*/ { - send_IPI_allbutself(MTRR_CHANGE_VECTOR); + unsigned long timeout; + struct smp_call_function_struct data; + static spinlock_t lock = SPIN_LOCK_UNLOCKED; + + if (retry) { + while (1) { + if (smp_call_function_data) { + schedule (); /* Give a mate a go */ + continue; + } + spin_lock (&lock); + if (smp_call_function_data) { + spin_unlock (&lock); /* Bad luck */ + continue; + } + /* Mine, all mine! */ + break; + } + } + else { + if (smp_call_function_data) return -EBUSY; + spin_lock (&lock); + if (smp_call_function_data) { + spin_unlock (&lock); + return -EBUSY; + } + } + smp_call_function_data = &data; + spin_unlock (&lock); + data.func = func; + data.info = info; + atomic_set (&data.unstarted_count, smp_num_cpus - 1); + data.wait = wait; + if (wait) atomic_set (&data.unfinished_count, smp_num_cpus - 1); + /* Send a message to all other CPUs and wait for them to respond */ + send_IPI_allbutself (CALL_FUNCTION_VECTOR); + /* Wait for response */ + timeout = jiffies + JIFFIE_TIMEOUT; + while ( (atomic_read (&data.unstarted_count) > 0) && + time_before (jiffies, timeout) ) + barrier (); + if (atomic_read (&data.unstarted_count) > 0) { + smp_call_function_data = NULL; + return -ETIMEDOUT; + } + if (wait) + while (atomic_read (&data.unfinished_count) > 0) + barrier (); + smp_call_function_data = NULL; + return 0; } /* @@ -1802,12 +1873,19 @@ stop_this_cpu(); } -void (*mtrr_hook) (void) = NULL; - -asmlinkage void smp_mtrr_interrupt(void) +asmlinkage void smp_call_function_interrupt(void) { - ack_APIC_irq(); - if (mtrr_hook) (*mtrr_hook)(); + void (*func) (void *info) = smp_call_function_data->func; + void *info = smp_call_function_data->info; + int wait = smp_call_function_data->wait; + + ack_APIC_irq (); + /* Notify initiating CPU that I've grabbed the data and am about to + execute the function */ + atomic_dec (&smp_call_function_data->unstarted_count); + /* At this point the structure may be out of scope unless wait==1 */ + (*func) (info); + if (wait) atomic_dec (&smp_call_function_data->unfinished_count); } /* diff -urN linux-2.2.4/drivers/video/vesafb.c linux/drivers/video/vesafb.c --- linux-2.2.4/drivers/video/vesafb.c Fri Feb 26 05:02:13 1999 +++ linux/drivers/video/vesafb.c Wed Mar 24 10:22:54 1999 @@ -632,9 +632,7 @@ video_cmap_len = 256; } request_region(0x3c0, 32, "vga+"); -#ifdef CONFIG_MTRR mtrr_add((unsigned long)video_base, video_size, MTRR_TYPE_WRCOMB, 1); -#endif strcpy(fb_info.modename, "VESA VGA"); fb_info.changevar = NULL; diff -urN linux-2.2.4/include/asm-alpha/init_steps.h linux/include/asm-alpha/init_steps.h --- linux-2.2.4/include/asm-alpha/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-alpha/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-alpha/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _alpha_INIT_STEPS_H +#define _alpha_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _alpha_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-arm/init_steps.h linux/include/asm-arm/init_steps.h --- linux-2.2.4/include/asm-arm/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-arm/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-arm/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _arm_INIT_STEPS_H +#define _arm_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _arm_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-generic/init_steps.h linux/include/asm-generic/init_steps.h --- linux-2.2.4/include/asm-generic/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-generic/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-generic/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _generic_INIT_STEPS_H +#define _generic_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _generic_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-i386/bugs.h linux/include/asm-i386/bugs.h --- linux-2.2.4/include/asm-i386/bugs.h Wed Mar 24 08:54:09 1999 +++ linux/include/asm-i386/bugs.h Wed Mar 24 10:22:54 1999 @@ -246,6 +246,7 @@ ((Cx86_dir0_msb == 5) || (Cx86_dir0_msb == 3))) { int eax, dummy; unsigned char ccr3, ccr4; + __u32 old_cap; cli(); ccr3 = getCx86(CX86_CCR3); @@ -257,8 +258,11 @@ /* we have up to level 1 available on the Cx6x86(L|MX) */ boot_cpu_data.cpuid_level = 1; + /* Need to preserve some externally computed capabilities */ + old_cap = boot_cpu_data.x86_capability & X86_FEATURE_MTRR; cpuid(1, &eax, &dummy, &dummy, &boot_cpu_data.x86_capability); + boot_cpu_data.x86_capability |= old_cap; boot_cpu_data.x86 = (eax >> 8) & 15; /* @@ -314,6 +318,24 @@ } /* + * In setup.c's cyrix_model() we have set the boot_cpu_data.coma_bug + * on certain processors that we know contain this bug and now we + * enable the workaround for it. + */ + +__initfunc(static void check_cyrix_coma(void)) +{ + if (boot_cpu_data.coma_bug) { + unsigned char ccr1; + cli(); + ccr1 = getCx86 (CX86_CCR1); + setCx86 (CX86_CCR1, ccr1 | 0x10); + sti(); + printk("Cyrix processor with \"coma bug\" found, workaround enabled\n"); + } +} + +/* * Check wether we are able to run this kernel safely on SMP. * * - In order to run on a i386, we need to be compiled for i386 @@ -371,5 +393,6 @@ check_popad(); check_amd_k6(); check_pentium_f00f(); + check_cyrix_coma(); system_utsname.machine[1] = '0' + boot_cpu_data.x86; } diff -urN linux-2.2.4/include/asm-i386/init_steps.h linux/include/asm-i386/init_steps.h --- linux-2.2.4/include/asm-i386/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-i386/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,32 @@ +/* include/asm-i386/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _i386_INIT_STEPS_H +#define _i386_INIT_STEPS_H + +#ifdef CONFIG_MTRR +# include +#endif + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +#ifdef CONFIG_MTRR + mtrr_init (); +#endif +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _i386_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-i386/mtrr.h linux/include/asm-i386/mtrr.h --- linux-2.2.4/include/asm-i386/mtrr.h Wed Mar 24 08:55:39 1999 +++ linux/include/asm-i386/mtrr.h Wed Mar 24 10:22:54 1999 @@ -1,6 +1,6 @@ /* Generic MTRR (Memory Type Range Register) ioctls. - Copyright (C) 1997-1998 Richard Gooch + Copyright (C) 1997-1999 Richard Gooch This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public @@ -44,10 +44,11 @@ }; /* These are the various ioctls */ -#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) -#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) -#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) +#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) +#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) +#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) +#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) /* These are the region types */ #define MTRR_TYPE_UNCACHABLE 0 @@ -75,7 +76,7 @@ #ifdef __KERNEL__ /* The following functions are for use by other drivers */ -# if defined(CONFIG_MTRR) || defined(CONFIG_MTRR_MODULE) +# ifdef CONFIG_MTRR extern int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_del (int reg, unsigned long base, unsigned long size); diff -urN linux-2.2.4/include/asm-i386/processor.h linux/include/asm-i386/processor.h --- linux-2.2.4/include/asm-i386/processor.h Wed Mar 24 08:54:14 1999 +++ linux/include/asm-i386/processor.h Wed Mar 24 10:22:54 1999 @@ -35,6 +35,7 @@ call */ int fdiv_bug; int f00f_bug; + int coma_bug; unsigned long loops_per_sec; unsigned long *pgd_quick; unsigned long *pte_quick; @@ -119,12 +120,17 @@ /* * Cyrix CPU configuration register indexes */ +#define CX86_CCR0 0xc0 +#define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 +#define CX86_CCR6 0xea #define CX86_DIR0 0xfe #define CX86_DIR1 0xff +#define CX86_ARR_BASE 0xc4 +#define CX86_RCR_BASE 0xdc /* * Cyrix CPU indexed register access macros diff -urN linux-2.2.4/include/asm-m68k/init_steps.h linux/include/asm-m68k/init_steps.h --- linux-2.2.4/include/asm-m68k/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-m68k/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-m68k/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _m68k_INIT_STEPS_H +#define _m68k_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _m68k_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-mips/init_steps.h linux/include/asm-mips/init_steps.h --- linux-2.2.4/include/asm-mips/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-mips/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-mips/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _mips_INIT_STEPS_H +#define _mips_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _mips_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-ppc/init_steps.h linux/include/asm-ppc/init_steps.h --- linux-2.2.4/include/asm-ppc/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-ppc/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-ppc/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _ppc_INIT_STEPS_H +#define _ppc_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _ppc_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-sparc/init_steps.h linux/include/asm-sparc/init_steps.h --- linux-2.2.4/include/asm-sparc/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-sparc/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-sparc/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _sparc_INIT_STEPS_H +#define _sparc_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _sparc_INIT_STEPS_H */ diff -urN linux-2.2.4/include/asm-sparc64/init_steps.h linux/include/asm-sparc64/init_steps.h --- linux-2.2.4/include/asm-sparc64/init_steps.h Thu Jan 1 10:00:00 1970 +++ linux/include/asm-sparc64/init_steps.h Wed Mar 24 10:22:54 1999 @@ -0,0 +1,25 @@ +/* include/asm-sparc64/init_steps.h + + Copyright (C) 1998-1999 Richard Gooch + Licenced under the GPL. + + This file contains architecture-specific setup code required during the + initialisation process. Multiple sequence points are defined. + +*/ +#ifndef _sparc64_INIT_STEPS_H +#define _sparc64_INIT_STEPS_H + +__initfunc(static void arch_post_cpu_n_task_init(void)) +{ +} /* End Function arch_post_cpu_n_task_init */ + +__initfunc(static void arch_pre_bus_init(void)) +{ +} /* End Function arch_pre_bus_init */ + +__initfunc(static void arch_post_bus_init(void)) +{ +} /* End Function arch_post_bus_init */ + +#endif /* _sparc64_INIT_STEPS_H */ diff -urN linux-2.2.4/include/linux/smp.h linux/include/linux/smp.h --- linux-2.2.4/include/linux/smp.h Wed Mar 24 08:54:15 1999 +++ linux/include/linux/smp.h Wed Mar 24 10:22:54 1999 @@ -42,6 +42,12 @@ extern void smp_commence(void); /* + * Call a function on all other processors + */ +extern int smp_call_function (void (*func) (void *info), void *info, + int retry, int wait); + +/* * True once the per process idle is forked */ extern int smp_threads_ready; @@ -60,7 +66,7 @@ * when rebooting */ #define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/ -#define MSG_MTRR_CHANGE 0x0004 /* Change MTRR */ +#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */ #else @@ -68,12 +74,13 @@ * These macros fold the SMP functionality into a single CPU system */ -#define smp_num_cpus 1 -#define smp_processor_id() 0 -#define hard_smp_processor_id() 0 -#define smp_threads_ready 1 +#define smp_num_cpus 1 +#define smp_processor_id() 0 +#define hard_smp_processor_id() 0 +#define smp_threads_ready 1 #define kernel_lock() -#define cpu_logical_map(cpu) 0 +#define cpu_logical_map(cpu) 0 +#define smp_call_function(func,info,retry,wait) #endif #endif diff -urN linux-2.2.4/init/main.c linux/init/main.c --- linux-2.2.4/init/main.c Thu Feb 25 11:27:54 1999 +++ linux/init/main.c Wed Mar 24 10:22:54 1999 @@ -25,6 +25,7 @@ #include #include +#include #ifdef CONFIG_PCI #include @@ -38,10 +39,6 @@ #include #endif -#ifdef CONFIG_MTRR -# include -#endif - #ifdef CONFIG_APM #include #endif @@ -1230,15 +1227,7 @@ */ child_reaper = current; -#if defined(CONFIG_MTRR) /* Do this after SMP initialization */ -/* - * We should probably create some architecture-dependent "fixup after - * everything is up" style function where this would belong better - * than in init/main.c.. - */ - mtrr_init(); -#endif - + arch_post_cpu_n_task_init(); /* Do this after SMP initialization */ #ifdef CONFIG_SYSCTL sysctl_init(); #endif @@ -1247,6 +1236,7 @@ * Ok, at this point all CPU's should be initialized, so * we can start looking into devices.. */ + arch_pre_bus_init(); #ifdef CONFIG_PCI pci_init(); #endif @@ -1268,6 +1258,7 @@ #ifdef CONFIG_DIO dio_init(); #endif + arch_post_bus_init(); /* Networking initialization needs a process context */ sock_init();