--- linux-2.6.0-test5/arch/alpha/boot/misc.c 2003-08-08 22:55:10.000000000 -0700 +++ 25/arch/alpha/boot/misc.c 2003-09-18 21:02:00.000000000 -0700 @@ -106,8 +106,8 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error\n"); - if (free_mem_ptr <= 0) error("Memory error\n"); + if (size <0) error("Malloc error"); + if (free_mem_ptr <= 0) error("Memory error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ @@ -140,7 +140,7 @@ static void gzip_release(void **ptr) int fill_inbuf(void) { if (insize != 0) - error("ran out of input data\n"); + error("ran out of input data"); inbuf = input_data; insize = input_data_size; --- linux-2.6.0-test5/arch/alpha/kernel/core_irongate.c 2003-06-14 12:18:25.000000000 -0700 +++ 25/arch/alpha/kernel/core_irongate.c 2003-09-18 21:38:20.000000000 -0700 @@ -391,7 +391,7 @@ irongate_ioremap(unsigned long addr, uns cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; - if (__alpha_remap_area_pages(VMALLOC_VMADDR(vaddr), + if (__alpha_remap_area_pages(vaddr, pte, PAGE_SIZE, 0)) { printk("AGP ioremap: FAILED to map...\n"); vfree(area->addr); --- linux-2.6.0-test5/arch/alpha/kernel/core_marvel.c 2003-08-22 19:23:39.000000000 -0700 +++ 25/arch/alpha/kernel/core_marvel.c 2003-09-18 21:38:20.000000000 -0700 @@ -696,7 +696,7 @@ marvel_ioremap(unsigned long addr, unsig } pfn >>= 1; /* make it a true pfn */ - if (__alpha_remap_area_pages(VMALLOC_VMADDR(vaddr), + if (__alpha_remap_area_pages(vaddr, pfn << PAGE_SHIFT, PAGE_SIZE, 0)) { printk("FAILED to map...\n"); --- linux-2.6.0-test5/arch/alpha/kernel/core_titan.c 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/alpha/kernel/core_titan.c 2003-09-18 21:38:20.000000000 -0700 @@ -534,7 +534,7 @@ titan_ioremap(unsigned long addr, unsign } pfn >>= 1; /* make it a true pfn */ - if (__alpha_remap_area_pages(VMALLOC_VMADDR(vaddr), + if (__alpha_remap_area_pages(vaddr, pfn << PAGE_SHIFT, PAGE_SIZE, 0)) { printk("FAILED to map...\n"); --- linux-2.6.0-test5/arch/arm26/boot/compressed/misc.c 2003-06-14 12:18:04.000000000 -0700 +++ 25/arch/arm26/boot/compressed/misc.c 2003-09-18 21:02:00.000000000 -0700 @@ -191,8 +191,8 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error\n"); - if (free_mem_ptr <= 0) error("Memory error\n"); + if (size <0) error("Malloc error"); + if (free_mem_ptr <= 0) error("Memory error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ @@ -236,7 +236,7 @@ static void gzip_release(void **ptr) int fill_inbuf(void) { if (insize != 0) - error("ran out of input data\n"); + error("ran out of input data"); inbuf = input_data; insize = &input_data_end[0] - &input_data[0]; --- linux-2.6.0-test5/arch/arm/boot/compressed/head-sa1100.S 2003-06-14 12:17:58.000000000 -0700 +++ 25/arch/arm/boot/compressed/head-sa1100.S 2003-09-18 21:02:00.000000000 -0700 @@ -34,6 +34,10 @@ __SA1100_start: @ REVISIT_PFS168: Temporary until firmware updated to use assigned machine number mov r7, #MACH_TYPE_PFS168 #endif +#ifdef CONFIG_SA1100_SIMPAD + @ UNTIL we've something like an open bootldr + mov r7, #MACH_TYPE_SIMPAD @should be 87 +#endif #ifdef CONFIG_SA1100_VICTOR teq r7, #MACH_TYPE_VICTOR --- linux-2.6.0-test5/arch/arm/boot/compressed/misc.c 2003-06-14 12:18:25.000000000 -0700 +++ 25/arch/arm/boot/compressed/misc.c 2003-09-18 21:02:00.000000000 -0700 @@ -191,8 +191,8 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error\n"); - if (free_mem_ptr <= 0) error("Memory error\n"); + if (size <0) error("Malloc error"); + if (free_mem_ptr <= 0) error("Memory error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ @@ -236,7 +236,7 @@ static void gzip_release(void **ptr) int fill_inbuf(void) { if (insize != 0) - error("ran out of input data\n"); + error("ran out of input data"); inbuf = input_data; insize = &input_data_end[0] - &input_data[0]; --- linux-2.6.0-test5/arch/arm/common/sa1111.c 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/common/sa1111.c 2003-09-18 21:02:00.000000000 -0700 @@ -54,87 +54,74 @@ struct sa1111 { */ static struct sa1111 *g_sa1111; -static struct sa1111_dev usb_dev = { - .skpcr_mask = SKPCR_UCLKEN, - .devid = SA1111_DEVID_USB, - .irq = { - IRQ_USBPWR, - IRQ_HCIM, - IRQ_HCIBUFFACC, - IRQ_HCIRMTWKP, - IRQ_NHCIMFCIR, - IRQ_USB_PORT_RESUME - }, +struct sa1111_dev_info { + unsigned long offset; + unsigned long skpcr_mask; + unsigned int devid; + unsigned int irq[6]; }; -static struct sa1111_dev sac_dev = { - .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN, - .devid = SA1111_DEVID_SAC, - .irq = { - AUDXMTDMADONEA, - AUDXMTDMADONEB, - AUDRCVDMADONEA, - AUDRCVDMADONEB +static struct sa1111_dev_info sa1111_devices[] = { + { + .offset = SA1111_USB, + .skpcr_mask = SKPCR_UCLKEN, + .devid = SA1111_DEVID_USB, + .irq = { + IRQ_USBPWR, + IRQ_HCIM, + IRQ_HCIBUFFACC, + IRQ_HCIRMTWKP, + IRQ_NHCIMFCIR, + IRQ_USB_PORT_RESUME + }, }, -}; - -static struct sa1111_dev ssp_dev = { - .skpcr_mask = SKPCR_SCLKEN, - .devid = SA1111_DEVID_SSP, -}; - -static struct sa1111_dev kbd_dev = { - .skpcr_mask = SKPCR_PTCLKEN, - .devid = SA1111_DEVID_PS2, - .irq = { - IRQ_TPRXINT, - IRQ_TPTXINT + { + .offset = 0x0600, + .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN, + .devid = SA1111_DEVID_SAC, + .irq = { + AUDXMTDMADONEA, + AUDXMTDMADONEB, + AUDRCVDMADONEA, + AUDRCVDMADONEB + }, }, -}; - -static struct sa1111_dev mse_dev = { - .skpcr_mask = SKPCR_PMCLKEN, - .devid = SA1111_DEVID_PS2, - .irq = { - IRQ_MSRXINT, - IRQ_MSTXINT + { + .offset = 0x0800, + .skpcr_mask = SKPCR_SCLKEN, + .devid = SA1111_DEVID_SSP, }, -}; - -static struct sa1111_dev int_dev = { - .skpcr_mask = 0, - .devid = SA1111_DEVID_INT, -}; - -static struct sa1111_dev pcmcia_dev = { - .skpcr_mask = 0, - .devid = SA1111_DEVID_PCMCIA, - .irq = { - IRQ_S0_READY_NINT, - IRQ_S0_CD_VALID, - IRQ_S0_BVD1_STSCHG, - IRQ_S1_READY_NINT, - IRQ_S1_CD_VALID, - IRQ_S1_BVD1_STSCHG, + { + .offset = SA1111_KBD, + .skpcr_mask = SKPCR_PTCLKEN, + .devid = SA1111_DEVID_PS2, + .irq = { + IRQ_TPRXINT, + IRQ_TPTXINT + }, + }, + { + .offset = SA1111_MSE, + .skpcr_mask = SKPCR_PMCLKEN, + .devid = SA1111_DEVID_PS2, + .irq = { + IRQ_MSRXINT, + IRQ_MSTXINT + }, + }, + { + .offset = 0x1800, + .skpcr_mask = 0, + .devid = SA1111_DEVID_PCMCIA, + .irq = { + IRQ_S0_READY_NINT, + IRQ_S0_CD_VALID, + IRQ_S0_BVD1_STSCHG, + IRQ_S1_READY_NINT, + IRQ_S1_CD_VALID, + IRQ_S1_BVD1_STSCHG, + }, }, -}; - -static struct sa1111_dev *devs[] = { - &usb_dev, - &sac_dev, - &ssp_dev, - &kbd_dev, - &mse_dev, - &pcmcia_dev, -}; - -static unsigned int dev_offset[] = { - SA1111_USB, - 0x0600, - 0x0800, - SA1111_KBD, - SA1111_MSE, - 0x1800, }; /* @@ -372,44 +359,45 @@ static struct irqchip sa1111_high_chip = .wake = sa1111_wake_highirq, }; -static void __init sa1111_init_irq(struct sa1111_dev *sadev) +static void sa1111_setup_irq(struct sa1111 *sachip) { + void *irqbase = sachip->base + SA1111_INTC; unsigned int irq; /* * We're guaranteed that this region hasn't been taken. */ - request_mem_region(sadev->res.start, 512, "irqs"); + request_mem_region(sachip->phys + SA1111_INTC, 512, "irq"); /* disable all IRQs */ - sa1111_writel(0, sadev->mapbase + SA1111_INTEN0); - sa1111_writel(0, sadev->mapbase + SA1111_INTEN1); - sa1111_writel(0, sadev->mapbase + SA1111_WAKEEN0); - sa1111_writel(0, sadev->mapbase + SA1111_WAKEEN1); + sa1111_writel(0, irqbase + SA1111_INTEN0); + sa1111_writel(0, irqbase + SA1111_INTEN1); + sa1111_writel(0, irqbase + SA1111_WAKEEN0); + sa1111_writel(0, irqbase + SA1111_WAKEEN1); /* * detect on rising edge. Note: Feb 2001 Errata for SA1111 * specifies that S0ReadyInt and S1ReadyInt should be '1'. */ - sa1111_writel(0, sadev->mapbase + SA1111_INTPOL0); + sa1111_writel(0, irqbase + SA1111_INTPOL0); sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) | SA1111_IRQMASK_HI(IRQ_S1_READY_NINT), - sadev->mapbase + SA1111_INTPOL1); + irqbase + SA1111_INTPOL1); /* clear all IRQs */ - sa1111_writel(~0, sadev->mapbase + SA1111_INTSTATCLR0); - sa1111_writel(~0, sadev->mapbase + SA1111_INTSTATCLR1); + sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0); + sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) { set_irq_chip(irq, &sa1111_low_chip); - set_irq_chipdata(irq, sadev->mapbase); + set_irq_chipdata(irq, irqbase); set_irq_handler(irq, do_edge_IRQ); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) { set_irq_chip(irq, &sa1111_high_chip); - set_irq_chipdata(irq, sadev->mapbase); + set_irq_chipdata(irq, irqbase); set_irq_handler(irq, do_edge_IRQ); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } @@ -417,9 +405,9 @@ static void __init sa1111_init_irq(struc /* * Register SA1111 interrupt */ - set_irq_type(sadev->irq[0], IRQT_RISING); - set_irq_data(sadev->irq[0], sadev->mapbase); - set_irq_chained_handler(sadev->irq[0], sa1111_irq_handler); + set_irq_type(sachip->irq, IRQT_RISING); + set_irq_data(sachip->irq, irqbase); + set_irq_chained_handler(sachip->irq, sa1111_irq_handler); } /* @@ -529,37 +517,64 @@ sa1111_configure_smc(struct sa1111 *sach #endif -static void +static void sa1111_dev_release(struct device *_dev) +{ + struct sa1111_dev *dev = SA1111_DEV(_dev); + + release_resource(&dev->res); + kfree(dev); +} + +static int sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, - struct sa1111_dev *sadev, unsigned int offset) + struct sa1111_dev_info *info) { - snprintf(sadev->dev.bus_id, sizeof(sadev->dev.bus_id), - "%4.4x", offset); + struct sa1111_dev *dev; + int ret; + + dev = kmalloc(sizeof(struct sa1111_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto out; + } + memset(dev, 0, sizeof(struct sa1111_dev)); + + snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id), + "%4.4lx", info->offset); /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (sachip->dev->dma_mask) { - sadev->dma_mask = *sachip->dev->dma_mask; - sadev->dev.dma_mask = &sadev->dma_mask; + dev->dma_mask = *sachip->dev->dma_mask; + dev->dev.dma_mask = &dev->dma_mask; } - sadev->dev.parent = sachip->dev; - sadev->dev.bus = &sa1111_bus_type; - sadev->res.start = sachip->phys + offset; - sadev->res.end = sadev->res.start + 511; - sadev->res.name = sadev->dev.bus_id; - sadev->res.flags = IORESOURCE_MEM; - sadev->mapbase = sachip->base + offset; + dev->devid = info->devid; + dev->dev.parent = sachip->dev; + dev->dev.bus = &sa1111_bus_type; + dev->dev.release = sa1111_dev_release; + dev->res.start = sachip->phys + info->offset; + dev->res.end = dev->res.start + 511; + dev->res.name = dev->dev.bus_id; + dev->res.flags = IORESOURCE_MEM; + dev->mapbase = sachip->base + info->offset; - if (request_resource(parent, &sadev->res)) { + ret = request_resource(parent, &dev->res); + if (ret) { printk("SA1111: failed to allocate resource for %s\n", - sadev->res.name); - return; + dev->res.name); + goto out; } - device_register(&sadev->dev); + ret = device_register(&dev->dev); + if (ret) { + release_resource(&dev->res); + out: + kfree(dev); + } + return ret; } /** @@ -655,11 +670,8 @@ __sa1111_probe(struct device *me, struct * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ - if (irq != NO_IRQ) { - int_dev.irq[0] = irq; - sa1111_init_one_child(sachip, mem, &int_dev, SA1111_INTC); - sa1111_init_irq(&int_dev); - } + if (sachip->irq != NO_IRQ) + sa1111_setup_irq(sachip); g_sa1111 = sachip; @@ -670,9 +682,9 @@ __sa1111_probe(struct device *me, struct else has_devs &= ~(1 << 1); - for (i = 0; i < ARRAY_SIZE(devs); i++) + for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++) if (has_devs & (1 << i)) - sa1111_init_one_child(sachip, mem, devs[i], dev_offset[i]); + sa1111_init_one_child(sachip, mem, &sa1111_devices[i]); return 0; @@ -685,11 +697,26 @@ __sa1111_probe(struct device *me, struct static void __sa1111_remove(struct sa1111 *sachip) { - int i; + struct list_head *l, *n; + void *irqbase = sachip->base + SA1111_INTC; + + list_for_each_safe(l, n, &sachip->dev->children) { + struct device *d = list_to_dev(l); - for (i = 0; i < ARRAY_SIZE(devs); i++) { - put_device(&devs[i]->dev); - release_resource(&devs[i]->res); + device_unregister(d); + } + + /* disable all IRQs */ + sa1111_writel(0, irqbase + SA1111_INTEN0); + sa1111_writel(0, irqbase + SA1111_INTEN1); + sa1111_writel(0, irqbase + SA1111_WAKEEN0); + sa1111_writel(0, irqbase + SA1111_WAKEEN1); + + if (sachip->irq != NO_IRQ) { + set_irq_chained_handler(sachip->irq, NULL); + set_irq_data(sachip->irq, NULL); + + release_mem_region(sachip->phys + SA1111_INTC, 512); } iounmap(sachip->base); @@ -958,17 +985,6 @@ static struct device_driver sa1111_devic }; /* - * Register the SA1111 driver with LDM. - */ -static int sa1111_driver_init(void) -{ - driver_register(&sa1111_device_driver); - return 0; -} - -arch_initcall(sa1111_driver_init); - -/* * Get the parent device driver (us) structure * from a child function device */ @@ -1180,13 +1196,6 @@ struct bus_type sa1111_bus_type = { .resume = sa1111_bus_resume, }; -static int sa1111_rab_bus_init(void) -{ - return bus_register(&sa1111_bus_type); -} - -postcore_initcall(sa1111_rab_bus_init); - int sa1111_driver_register(struct sa1111_driver *driver) { driver->drv.probe = sa1111_bus_probe; @@ -1200,6 +1209,26 @@ void sa1111_driver_unregister(struct sa1 driver_unregister(&driver->drv); } +static int __init sa1111_init(void) +{ + int ret = bus_register(&sa1111_bus_type); + if (ret == 0) + driver_register(&sa1111_device_driver); + return ret; +} + +static void __exit sa1111_exit(void) +{ + driver_unregister(&sa1111_device_driver); + bus_unregister(&sa1111_bus_type); +} + +module_init(sa1111_init); +module_exit(sa1111_exit); + +MODULE_DESCRIPTION("Intel Corporation SA1111 core driver"); +MODULE_LICENSE("GPL"); + EXPORT_SYMBOL(sa1111_check_dma_bug); EXPORT_SYMBOL(sa1111_select_audio_mode); EXPORT_SYMBOL(sa1111_set_audio_rate); --- linux-2.6.0-test5/arch/arm/common/sa1111-pcipool.c 2003-08-08 22:55:10.000000000 -0700 +++ 25/arch/arm/common/sa1111-pcipool.c 2003-09-18 21:02:00.000000000 -0700 @@ -51,7 +51,7 @@ struct pci_page { /* cacheable header fo static inline const char *slot_name(const struct pci_pool *pool) { - const struct pci_dev *pdev = pool->dev; + struct pci_dev *pdev = (struct pci_dev *)pool->dev; if (pdev == 0) return "[0]"; --- linux-2.6.0-test5/arch/arm/Kconfig 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/Kconfig 2003-09-18 21:02:00.000000000 -0700 @@ -213,198 +213,7 @@ config FORCE_MAX_ZONEORDER depends on SA1111 default "9" -comment "Processor Type" - -# Figure out whether this system uses 26-bit or 32-bit CPUs. -config CPU_32 - bool - default y - -# Select CPU types depending on the architecture selected. This selects -# which CPUs we support in the kernel image, and the compiler instruction -# optimiser behaviour. -# ARM610 -config CPU_ARM610 - bool "Support ARM610 processor" - depends on ARCH_RPC - help - The ARM610 is the successor to the ARM3 processor - and was produced by VLSI Technology Inc. - - Say Y if you want support for the ARM610 processor. - Otherwise, say N. - -# ARM710 -config CPU_ARM710 - bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC - default y if ARCH_CLPS7500 - help - A 32-bit RISC microprocessor based on the ARM7 processor core - designed by Advanced RISC Machines Ltd. The ARM710 is the - successor to the ARM610 processor. It was released in - July 1994 by VLSI Technology Inc. - - Say Y if you want support for the ARM710 processor. - Otherwise, say N. - -# ARM720T -config CPU_ARM720T - bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR - default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 - help - A 32-bit RISC processor with 8kByte Cache, Write Buffer and - MMU built around an ARM7TDMI core. - - Say Y if you want support for the ARM720T processor. - Otherwise, say N. - -# ARM920T -config CPU_ARM920T - bool "Support ARM920T processor" - depends on ARCH_INTEGRATOR - help - The ARM920T is licensed to be produced by numerous vendors, - and is used in the Maverick EP9312. More information at - . - - Say Y if you want support for the ARM920T processor. - Otherwise, say N. - -# ARM922T -config CPU_ARM922T - bool - depends on ARCH_CAMELOT - default y - help - The ARM922T is a version of the ARM920T, but with smaller - instruction and data caches. It is used in Altera's - Excalibur XA device family. - - Say Y if you want support for the ARM922T processor. - Otherwise, say N. - -# ARM926T -config CPU_ARM926T - bool "Support ARM926T processor" - depends on ARCH_INTEGRATOR - help - This is a variant of the ARM920. It has slightly different - instruction sequences for cache and TLB operations. Curiously, - there is no documentation on it at the ARM corporate website. - - Say Y if you want support for the ARM926T processor. - Otherwise, say N. - -# ARM1020 -config CPU_ARM1020 - bool "Support ARM1020 processor" - depends on ARCH_INTEGRATOR - help - The ARM1020 is the cached version of the ARM10 processor, - with an addition of a floating-point unit. - - Say Y if you want support for the ARM1020 processor. - Otherwise, say N. - -# SA110 -config CPU_SA110 - bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && !ARCH_ANAKIN && ARCH_RPC - default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI || ARCH_ANAKIN - help - The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and - is available at five speeds ranging from 100 MHz to 233 MHz. - More information is available at - . - - Say Y if you want support for the SA-110 processor. - Otherwise, say N. - -# SA1100 -config CPU_SA1100 - bool - depends on ARCH_SA1100 - default y - -# XScale -config CPU_XSCALE - bool - depends on ARCH_IOP3XX || ARCH_ADIFCC || ARCH_PXA - default y - -# Figure out what processor architecture version we should be using. -# This defines the compiler instruction set which depends on the machine type. -config CPU_32v3 - bool - depends on ARCH_RPC || ARCH_CLPS7500 - default y - -config CPU_32v4 - bool - depends on ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI || ARCH_CLPS711X || ARCH_INTEGRATOR || ARCH_SA1100 || ARCH_L7200 || ARCH_ANAKIN || ARCH_CAMELOT - default y - -config CPU_32v5 - bool - depends on ARCH_IOP3XX || ARCH_ADIFCC || ARCH_PXA - default y - -comment "Processor Features" - -config ARM_THUMB - bool "Support Thumb instructions (EXPERIMENTAL)" - depends on (CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 || CPU_XSCALE) && EXPERIMENTAL - help - Say Y if you want to have kernel support for ARM Thumb instructions, - fault handlers, and system calls. - - The Thumb instruction set is a compressed form of the standard ARM - instruction set resulting in smaller binaries at the expense of - slightly less efficient code. - - If you don't know what this all is, saying Y is a safe choice. - -config CPU_BIG_ENDIAN - bool "Build big-endian kernel" - depends on ARCH_SUPPORTS_BIG_ENDIAN - help - Say Y if you plan on running a kernel in big-endian mode. - Note that your board must be properly built and your board - port must properly enable and big-endian related features - of your chipset/board/processor. - -config CPU_ICACHE_DISABLE - bool "Disable I-Cache" - depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 - help - Say Y here to disable the processor instruction cache. Unless - you have a reason not to or are unsure, say N. - -config CPU_DCACHE_DISABLE - bool "Disable D-Cache" - depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 - help - Say Y here to disable the processor data cache. Unless - you have a reason not to or are unsure, say N. - -config CPU_DCACHE_WRITETHROUGH - bool "Force write through D-cache" - depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE - help - Say Y here to use the data cache in writethough mode. Unless you - specifically require this or are unsure, say N. - -config CPU_CACHE_ROUND_ROBIN - bool "Round robin I and D cache replacement algorithm" - depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE) - help - Say Y here to use the predictable round-robin cache replacement - policy. Unless you specifically require this or are unsure, say N. - -config CPU_BPREDICT_DISABLE - bool "Disable branch prediction" - depends on CPU_ARM1020 - help - Say Y here to disable branch prediction. If unsure, say N. +source arch/arm/mm/Kconfig # bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER config XSCALE_PMU @@ -842,11 +651,6 @@ source "drivers/input/Kconfig" source "drivers/char/Kconfig" -config KBDMOUSE - bool - depends on ARCH_ACORN && BUSMOUSE=y && !ARCH_RPC - default y - source "drivers/media/Kconfig" source "fs/Kconfig" --- linux-2.6.0-test5/arch/arm/kernel/asm-offsets.c 2003-08-08 22:55:10.000000000 -0700 +++ 25/arch/arm/kernel/asm-offsets.c 2003-09-18 21:02:00.000000000 -0700 @@ -15,9 +15,6 @@ #include #include -#include -#include - /* * Make sure that the compiler and target are compatible. */ @@ -58,19 +55,6 @@ int main(void) BLANK(); DEFINE(VM_EXEC, VM_EXEC); BLANK(); - DEFINE(HPTE_TYPE_SMALL, PTE_TYPE_SMALL); - DEFINE(HPTE_AP_READ, PTE_AP_READ); - DEFINE(HPTE_AP_WRITE, PTE_AP_WRITE); - BLANK(); - DEFINE(LPTE_PRESENT, L_PTE_PRESENT); - DEFINE(LPTE_YOUNG, L_PTE_YOUNG); - DEFINE(LPTE_BUFFERABLE, L_PTE_BUFFERABLE); - DEFINE(LPTE_CACHEABLE, L_PTE_CACHEABLE); - DEFINE(LPTE_USER, L_PTE_USER); - DEFINE(LPTE_WRITE, L_PTE_WRITE); - DEFINE(LPTE_EXEC, L_PTE_EXEC); - DEFINE(LPTE_DIRTY, L_PTE_DIRTY); - BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); DEFINE(SYS_ERROR0, 0x9f0000); --- linux-2.6.0-test5/arch/arm/kernel/bios32.c 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/kernel/bios32.c 2003-09-18 21:02:00.000000000 -0700 @@ -35,18 +35,17 @@ void pcibios_report_status(u_int status_ continue; pci_read_config_word(dev, PCI_STATUS, &status); + if (status == 0xffff) + continue; - status &= status_mask; - if (status == 0) + if ((status & status_mask) == 0) continue; /* clear the status errors */ - pci_write_config_word(dev, PCI_STATUS, status); + pci_write_config_word(dev, PCI_STATUS, status & status_mask); if (warn) - printk("(%02x:%02x.%d: %04X) ", dev->bus->number, - PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), - status); + printk("(%s: %04X) ", pci_name(dev), status); } } --- linux-2.6.0-test5/arch/arm/kernel/setup.c 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/kernel/setup.c 2003-09-18 21:02:00.000000000 -0700 @@ -798,7 +798,7 @@ static int c_show(struct seq_file *m, vo seq_printf(m, "Cache type\t: %s\n" "Cache clean\t: %s\n" "Cache lockdown\t: %s\n" - "Cache unified\t: %s\n", + "Cache format\t: %s\n", cache_types[CACHE_TYPE(cache_info)], cache_clean[CACHE_TYPE(cache_info)], cache_lockdown[CACHE_TYPE(cache_info)], --- linux-2.6.0-test5/arch/arm/mach-integrator/core.c 2003-06-22 12:04:43.000000000 -0700 +++ 25/arch/arm/mach-integrator/core.c 2003-09-18 21:02:00.000000000 -0700 @@ -34,6 +34,8 @@ #include #include +#include + #include #include #include @@ -46,6 +48,7 @@ * just for now). */ #define VA_IC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE) +#define VA_SC_BASE IO_ADDRESS(INTEGRATOR_SC_BASE) #define VA_CMIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_IC_OFFSET /* @@ -66,7 +69,7 @@ * f1a00000 1a000000 Debug LEDs * f1b00000 1b000000 GPIO */ - + static struct map_desc integrator_io_desc[] __initdata = { { IO_ADDRESS(INTEGRATOR_HDR_BASE), INTEGRATOR_HDR_BASE, SZ_4K, MT_DEVICE }, { IO_ADDRESS(INTEGRATOR_SC_BASE), INTEGRATOR_SC_BASE, SZ_4K, MT_DEVICE }, @@ -89,7 +92,7 @@ static void __init integrator_map_io(voi iotable_init(integrator_io_desc, ARRAY_SIZE(integrator_io_desc)); } -#define ALLPCI ( (1 << IRQ_PCIINT0) | (1 << IRQ_PCIINT1) | (1 << IRQ_PCIINT2) | (1 << IRQ_PCIINT3) ) +#define ALLPCI ( (1 << IRQ_PCIINT0) | (1 << IRQ_PCIINT1) | (1 << IRQ_PCIINT2) | (1 << IRQ_PCIINT3) ) static void sc_mask_irq(unsigned int irq) { @@ -161,6 +164,7 @@ static struct amba_device *amba_devs[] _ static int __init register_devices(void) { + unsigned long sc_dec; int i; for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { @@ -169,6 +173,28 @@ static int __init register_devices(void) amba_device_register(d, &iomem_resource); } + sc_dec = readl(VA_SC_BASE + INTEGRATOR_SC_DEC_OFFSET); + for (i = 0; i < 4; i++) { + struct lm_device *lmdev; + + if ((sc_dec & (16 << i)) == 0) + continue; + + lmdev = kmalloc(sizeof(struct lm_device), GFP_KERNEL); + if (!lmdev) + continue; + + memset(lmdev, 0, sizeof(struct lm_device)); + + lmdev->resource.start = 0xc0000000 + 0x10000000 * i; + lmdev->resource.end = lmdev->resource.start + 0x0fffffff; + lmdev->resource.flags = IORESOURCE_MEM; + lmdev->irq = IRQ_EXPINT0 + i; + lmdev->id = i; + + lm_device_register(lmdev); + } + return 0; } --- linux-2.6.0-test5/arch/arm/mach-integrator/impd1.c 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mach-integrator/impd1.c 2003-09-18 21:02:00.000000000 -0700 @@ -10,7 +10,7 @@ * This file provides the core support for the IM-PD1 module. * * Module / boot parameters. - * id=n impd1.id=n - set the logic module position in stack to 'n' + * lmid=n impd1.lmid=n - set the logic module position in stack to 'n' */ #include #include @@ -21,17 +21,15 @@ #include #include #include +#include #include #include static int module_id; -module_param_named(lmid, module_id, int, 0); +module_param_named(lmid, module_id, int, 0444); MODULE_PARM_DESC(lmid, "logic module stack position"); -#define ROM_OFFSET 0x0fffff00 -#define ROM_SIZE 256 - struct impd1_module { void *base; }; @@ -142,17 +140,15 @@ static struct impd1_device impd1_devs[] } }; -static int impd1_probe(struct device *dev) +static int impd1_probe(struct lm_device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct resource *res = &pdev->resource[0]; struct impd1_module *impd1; int i, ret; - if (pdev->id != module_id) + if (dev->id != module_id) return -EINVAL; - if (!request_mem_region(res->start, SZ_4K, "LM registers")) + if (!request_mem_region(dev->resource.start, SZ_4K, "LM registers")) return -EBUSY; impd1 = kmalloc(sizeof(struct impd1_module), GFP_KERNEL); @@ -162,22 +158,22 @@ static int impd1_probe(struct device *de } memset(impd1, 0, sizeof(struct impd1_module)); - impd1->base = ioremap(res->start, SZ_4K); + impd1->base = ioremap(dev->resource.start, SZ_4K); if (!impd1->base) { ret = -ENOMEM; goto free_impd1; } - dev_set_drvdata(dev, impd1); + lm_set_drvdata(dev, impd1); - printk("IM-PD1 found at 0x%08lx\n", res->start); + printk("IM-PD1 found at 0x%08lx\n", dev->resource.start); for (i = 0; i < ARRAY_SIZE(impd1_devs); i++) { struct impd1_device *idev = impd1_devs + i; struct amba_device *d; unsigned long pc_base; - pc_base = res->start + idev->offset; + pc_base = dev->resource.start + idev->offset; d = kmalloc(sizeof(struct amba_device), GFP_KERNEL); if (!d) @@ -186,16 +182,16 @@ static int impd1_probe(struct device *de memset(d, 0, sizeof(struct amba_device)); snprintf(d->dev.bus_id, sizeof(d->dev.bus_id), - "lm%x:%5.5lx", pdev->id, idev->offset >> 12); + "lm%x:%5.5lx", dev->id, idev->offset >> 12); - d->dev.parent = &pdev->dev; - d->res.start = res->start + idev->offset; + d->dev.parent = &dev->dev; + d->res.start = dev->resource.start + idev->offset; d->res.end = d->res.start + SZ_4K - 1; d->res.flags = IORESOURCE_MEM; - d->irq = pdev->resource[1].start; + d->irq = dev->irq; d->periphid = idev->id; - ret = amba_device_register(d, res); + ret = amba_device_register(d, &dev->resource); if (ret) { printk("unable to register device %s: %d\n", d->dev.bus_id, ret); @@ -211,47 +207,44 @@ static int impd1_probe(struct device *de if (impd1) kfree(impd1); release_lm: - release_mem_region(res->start, SZ_4K); + release_mem_region(dev->resource.start, SZ_4K); return ret; } -static int impd1_remove(struct device *dev) +static void impd1_remove(struct lm_device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct resource *res = &pdev->resource[0]; - struct impd1_module *impd1 = dev_get_drvdata(dev); + struct impd1_module *impd1 = lm_get_drvdata(dev); struct list_head *l, *n; - list_for_each_safe(l, n, &dev->children) { + list_for_each_safe(l, n, &dev->dev.children) { struct device *d = list_to_dev(l); device_unregister(d); } - dev_set_drvdata(dev, NULL); + lm_set_drvdata(dev, NULL); iounmap(impd1->base); kfree(impd1); - release_mem_region(res->start, SZ_4K); - - return 0; + release_mem_region(dev->resource.start, SZ_4K); } -static struct device_driver impd1_driver = { - .name = "lm", - .bus = &platform_bus_type, +static struct lm_driver impd1_driver = { + .drv = { + .name = "impd1", + }, .probe = impd1_probe, .remove = impd1_remove, }; static int __init impd1_init(void) { - return driver_register(&impd1_driver); + return lm_driver_register(&impd1_driver); } static void __exit impd1_exit(void) { - driver_unregister(&impd1_driver); + lm_driver_unregister(&impd1_driver); } module_init(impd1_init); --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mach-integrator/lm.c 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,92 @@ +/* + * linux/arch/arm/mach-integrator/lm.c + * + * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +#include + +#define to_lm_device(d) container_of(d, struct lm_device, dev) +#define to_lm_driver(d) container_of(d, struct lm_driver, drv) + +static int lm_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static struct bus_type lm_bustype = { + .name = "logicmodule", + .match = lm_match, +// .suspend = lm_suspend, +// .resume = lm_resume, +}; + +static int __init lm_init(void) +{ + return bus_register(&lm_bustype); +} + +postcore_initcall(lm_init); + +static int lm_bus_probe(struct device *dev) +{ + struct lm_device *lmdev = to_lm_device(dev); + struct lm_driver *lmdrv = to_lm_driver(dev->driver); + + return lmdrv->probe(lmdev); +} + +static int lm_bus_remove(struct device *dev) +{ + struct lm_device *lmdev = to_lm_device(dev); + struct lm_driver *lmdrv = to_lm_driver(dev->driver); + + lmdrv->remove(lmdev); + return 0; +} + +int lm_driver_register(struct lm_driver *drv) +{ + drv->drv.bus = &lm_bustype; + drv->drv.probe = lm_bus_probe; + drv->drv.remove = lm_bus_remove; + + return driver_register(&drv->drv); +} + +void lm_driver_unregister(struct lm_driver *drv) +{ + driver_unregister(&drv->drv); +} + +static void lm_device_release(struct device *dev) +{ + struct lm_device *d = to_lm_device(dev); + + kfree(d); +} + +int lm_device_register(struct lm_device *dev) +{ + int ret; + + dev->dev.release = lm_device_release; + dev->dev.bus = &lm_bustype; + + snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id), "lm%d", dev->id); + dev->resource.name = dev->dev.bus_id; + + ret = request_resource(&iomem_resource, &dev->resource); + if (ret == 0) { + ret = device_register(&dev->dev); + if (ret) + release_resource(&dev->resource); + } + return ret; +} --- linux-2.6.0-test5/arch/arm/mach-integrator/Makefile 2003-08-22 19:23:40.000000000 -0700 +++ 25/arch/arm/mach-integrator/Makefile 2003-09-18 21:02:00.000000000 -0700 @@ -4,7 +4,7 @@ # Object file lists. -obj-y := core.o time.o +obj-y := core.o lm.o time.o obj-$(CONFIG_LEDS) += leds.o obj-$(CONFIG_PCI) += pci_v3.o pci.o --- linux-2.6.0-test5/arch/arm/mach-sa1100/simpad.c 2003-06-14 12:17:57.000000000 -0700 +++ 25/arch/arm/mach-sa1100/simpad.c 2003-09-18 21:02:00.000000000 -0700 @@ -9,24 +9,37 @@ #include #include #include +#include +#include #include #include +#include #include #include #include +#include +#include + #include +#include +#include #include "generic.h" long cs3_shadow; -long get_cs3_shadow() +long get_cs3_shadow(void) { return cs3_shadow; } +void set_cs3(long value) +{ + *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow = value; +} + void set_cs3_bit(int value) { cs3_shadow |= value; @@ -39,10 +52,15 @@ void clear_cs3_bit(int value) *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow; } +EXPORT_SYMBOL(set_cs3_bit); +EXPORT_SYMBOL(clear_cs3_bit); + static struct map_desc simpad_io_desc[] __initdata = { - /* virtual physical length type */ - { 0xf2800000, 0x4b800000, 0x00800000, MT_DEVICE }, /* MQ200 */ - { 0xf1000000, 0x18000000, 0x00100000, MT_DEVICE } /* Paules CS3, write only */ + /* virtual physical length type */ + /* MQ200 */ + { 0xf2800000, 0x4b800000, 0x00800000, MT_DEVICE }, + /* Paules CS3, write only */ + { 0xf1000000, 0x18000000, 0x00100000, MT_DEVICE }, }; @@ -50,32 +68,52 @@ static void simpad_uart_pm(struct uart_p { if (port->mapbase == (u_int)&Ser1UTCR0) { if (state) + { clear_cs3_bit(RS232_ON); - else + clear_cs3_bit(DECT_POWER_ON); + }else + { set_cs3_bit(RS232_ON); + set_cs3_bit(DECT_POWER_ON); + } } } static struct sa1100_port_fns simpad_port_fns __initdata = { - .pm = simpad_uart_pm, + .pm = simpad_uart_pm, }; static void __init simpad_map_io(void) { sa1100_map_io(); + iotable_init(simpad_io_desc, ARRAY_SIZE(simpad_io_desc)); - PSPR = 0xc0008000; - GPDR &= ~GPIO_GPIO0; - cs3_shadow = (EN1 | EN0 | LED2_ON | DISPLAY_ON | RS232_ON | - ENABLE_5V | RESET_SIMCARD); - *(CS3BUSTYPE *)(CS3_BASE) = cs3_shadow; + set_cs3_bit (EN1 | EN0 | LED2_ON | DISPLAY_ON | RS232_ON | + ENABLE_5V | RESET_SIMCARD | DECT_POWER_ON); + + + sa1100_register_uart_fns(&simpad_port_fns); + sa1100_register_uart(0, 3); /* serial interface */ + sa1100_register_uart(1, 1); /* DECT */ + + // Reassign UART 1 pins + GAFR |= GPIO_UART_TXD | GPIO_UART_RXD; + GPDR |= GPIO_UART_TXD | GPIO_LDD13 | GPIO_LDD15; + GPDR &= ~GPIO_UART_RXD; + PPAR |= PPAR_UPR; + + /* + * Set up registers for sleep mode. + */ + + + PWER = PWER_GPIO0| PWER_RTC; + PGSR = 0x818; + PCFR = 0; + PSDR = 0; - //It is only possible to register 3 UART in serial_sa1100.c - sa1100_register_uart(0, 3); - sa1100_register_uart(1, 1); - set_irq_type(IRQ_GPIO_UCB1300_IRQ, IRQT_RISING); } #ifdef CONFIG_PROC_FS @@ -105,7 +143,7 @@ static int proc_cs3_read(char *page, cha char *p = page; int len, i; - p += sprintf(p, "Chipselect3 : %x\n", cs3_shadow); + p += sprintf(p, "Chipselect3 : %x\n", (uint)cs3_shadow); for (i = 0; i <= 15; i++) { if(cs3_shadow & (1<read_proc = proc_cs3_read; + proc_cs3->write_proc = (void*)proc_cs3_write; + } +#endif + + + return 0; } arch_initcall(cs3_init); -#endif // CONFIG_PROC_FS +static void simpad_power_off(void) +{ + local_irq_disable(); // was cli + set_cs3(0x800); /* only SD_MEDIAQ */ + + /* disable internal oscillator, float CS lines */ + PCFR = (PCFR_OPDE | PCFR_FP | PCFR_FS); + /* enable wake-up on GPIO0 (Assabet...) */ + PWER = GFER = GRER = 1; + /* + * set scratchpad to zero, just in case it is used as a + * restart address by the bootloader. + */ + PSPR = 0; + PGSR = 0; + /* enter sleep mode */ + PMCR = PMCR_SF; + while(1); + + local_irq_enable(); /* we won't ever call it */ + + +} + +static int __init simpad_init(void) +{ + set_power_off_handler( simpad_power_off ); + return 0; +} + +arch_initcall(simpad_init); + MACHINE_START(SIMPAD, "Simpad") MAINTAINER("Juergen Messerer") BOOT_MEM(0xc0000000, 0x80000000, 0xf8000000) + BOOT_PARAMS(0xc0000100) MAPIO(simpad_map_io) INITIRQ(sa1100_init_irq) MACHINE_END --- linux-2.6.0-test5/arch/arm/mm/abort-ev5tej.S 2003-06-14 12:18:22.000000000 -0700 +++ /dev/null 2002-08-30 16:31:37.000000000 -0700 @@ -1,36 +0,0 @@ -#include -#include -/* - * Function: v5tej_early_abort - * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR - * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers - * - * Purpose : obtain information about current aborted instruction. - * Note: we read user space. This means we might cause a data - * abort here if the I-TLB and D-TLB aren't seeing the same - * picture. Unfortunately, this does happen. We live with it. - */ - .align 5 -ENTRY(v5tej_early_abort) - mrc p15, 0, r1, c5, c0, 0 @ get FSR - mrc p15, 0, r0, c6, c0, 0 @ get FAR - bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR - tst r3, #PSR_J_BIT - orrne r1, r1, #1 << 11 @ always assume write - bne 1f - tst r3, #PSR_T_BIT - ldrneh r3, [r2] @ read aborted thumb instruction - ldreq r3, [r2] @ read aborted ARM instruction - movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20 - tst r3, #1 << 20 @ L = 1 -> write - orreq r1, r1, #1 << 11 @ yes. -1: mov pc, lr - - --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mm/abort-ev5tj.S 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,36 @@ +#include +#include +/* + * Function: v5tj_early_abort + * + * Params : r2 = address of aborted instruction + * : r3 = saved SPSR + * + * Returns : r0 = address of abort + * : r1 = FSR, bit 11 = write + * : r2-r8 = corrupted + * : r9 = preserved + * : sp = pointer to registers + * + * Purpose : obtain information about current aborted instruction. + * Note: we read user space. This means we might cause a data + * abort here if the I-TLB and D-TLB aren't seeing the same + * picture. Unfortunately, this does happen. We live with it. + */ + .align 5 +ENTRY(v5tj_early_abort) + mrc p15, 0, r1, c5, c0, 0 @ get FSR + mrc p15, 0, r0, c6, c0, 0 @ get FAR + bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR + tst r3, #PSR_J_BIT @ Java? + orrne r1, r1, #1 << 11 @ always assume write + movne pc, lr + tst r3, #PSR_T_BIT @ Thumb? + ldrneh r3, [r2] @ read aborted thumb instruction + ldreq r3, [r2] @ read aborted ARM instruction + movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20 + tst r3, #1 << 20 @ L = 0 -> write + orreq r1, r1, #1 << 11 @ yes. + mov pc, lr + + --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mm/abort-ev5t.S 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,31 @@ +#include +#include +/* + * Function: v5t_early_abort + * + * Params : r2 = address of aborted instruction + * : r3 = saved SPSR + * + * Returns : r0 = address of abort + * : r1 = FSR, bit 11 = write + * : r2-r8 = corrupted + * : r9 = preserved + * : sp = pointer to registers + * + * Purpose : obtain information about current aborted instruction. + * Note: we read user space. This means we might cause a data + * abort here if the I-TLB and D-TLB aren't seeing the same + * picture. Unfortunately, this does happen. We live with it. + */ + .align 5 +ENTRY(v5t_early_abort) + mrc p15, 0, r1, c5, c0, 0 @ get FSR + mrc p15, 0, r0, c6, c0, 0 @ get FAR + tst r3, #PSR_T_BIT + ldrneh r3, [r2] @ read aborted thumb instruction + ldreq r3, [r2] @ read aborted ARM instruction + bic r1, r1, #1 << 11 @ clear bits 11 of FSR + movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20 + tst r3, #1 << 20 @ check write + orreq r1, r1, #1 << 11 + mov pc, lr --- linux-2.6.0-test5/arch/arm/mm/abort-xscale.S 2003-06-14 12:18:35.000000000 -0700 +++ /dev/null 2002-08-30 16:31:37.000000000 -0700 @@ -1,34 +0,0 @@ -#include -#include -/* - * Function: xscale_abort - * - * Params : r2 = address of aborted instruction - * : r3 = saved SPSR - * - * Returns : r0 = address of abort - * : r1 = FSR, bit 11 = write - * : r2-r8 = corrupted - * : r9 = preserved - * : sp = pointer to registers - * - * Purpose : obtain information about current aborted instruction. - * Note: we read user space. This means we might cause a data - * abort here if the I-TLB and D-TLB aren't seeing the same - * picture. Unfortunately, this does happen. We live with it. - * - * Note: Xscale is contains non-standard architecture extensions. - * It requires its own early abort handler - */ - .align 5 -ENTRY(xscale_abort) - mrc p15, 0, r1, c5, c0, 0 @ get FSR - mrc p15, 0, r0, c6, c0, 0 @ get FAR - tst r3, #PSR_T_BIT - ldrneh r3, [r2] @ read aborted thumb instruction - ldreq r3, [r2] @ read aborted ARM instruction - bic r1, r1, #1 << 11 @ clear bits 11 of FSR - movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20 - tst r3, #1 << 20 @ check write - orreq r1, r1, #1 << 11 - mov pc, lr --- linux-2.6.0-test5/arch/arm/mm/discontig.c 2003-06-14 12:18:31.000000000 -0700 +++ 25/arch/arm/mm/discontig.c 2003-09-18 22:02:45.000000000 -0700 @@ -15,7 +15,7 @@ #include #include -#if NR_NODES != 4 +#if MAX_NUMNODES != 4 #error Fix Me Please #endif @@ -23,9 +23,9 @@ * Our node_data structure for discontiguous memory. */ -static bootmem_data_t node_bootmem_data[NR_NODES]; +static bootmem_data_t node_bootmem_data[MAX_NUMNODES]; -pg_data_t discontig_node_data[NR_NODES] = { +pg_data_t discontig_node_data[MAX_NUMNODES] = { { .bdata = &node_bootmem_data[0] }, { .bdata = &node_bootmem_data[1] }, { .bdata = &node_bootmem_data[2] }, --- linux-2.6.0-test5/arch/arm/mm/fault-armv.c 2003-06-22 12:04:43.000000000 -0700 +++ 25/arch/arm/mm/fault-armv.c 2003-09-18 21:02:00.000000000 -0700 @@ -14,9 +14,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -127,6 +129,8 @@ do_PrefetchAbort(unsigned long addr, str do_translation_fault(addr, 0, regs); } +static unsigned long shared_pte_mask = L_PTE_CACHEABLE; + /* * We take the easy way out of this problem - we make the * PTE uncacheable. However, we leave the write buffer on. @@ -157,9 +161,9 @@ static int adjust_pte(struct vm_area_str * If this page isn't present, or is already setup to * fault (ie, is old), we can safely ignore any issues. */ - if (pte_present(entry) && pte_val(entry) & L_PTE_CACHEABLE) { + if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { flush_cache_page(vma, address); - pte_val(entry) &= ~L_PTE_CACHEABLE; + pte_val(entry) &= ~shared_pte_mask; set_pte(pte, entry); flush_tlb_page(vma, address); ret = 1; @@ -297,3 +301,65 @@ void update_mmu_cache(struct vm_area_str make_coherent(vma, addr, page, dirty); } } + +/* + * Check whether the write buffer has physical address aliasing + * issues. If it has, we need to avoid them for the case where + * we have several shared mappings of the same object in user + * space. + */ +static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) +{ + register unsigned long zero = 0, one = 1, val; + + local_irq_disable(); + mb(); + *p1 = one; + mb(); + *p2 = zero; + mb(); + val = *p1; + mb(); + local_irq_enable(); + return val != zero; +} + +void __init check_writebuffer_bugs(void) +{ + struct page *page; + const char *reason; + unsigned long v = 1; + + printk(KERN_INFO "CPU: Testing write buffer coherency: "); + + page = alloc_page(GFP_KERNEL); + if (page) { + unsigned long *p1, *p2; + pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| + L_PTE_DIRTY|L_PTE_WRITE| + L_PTE_BUFFERABLE); + + p1 = vmap(&page, 1, VM_IOREMAP, prot); + p2 = vmap(&page, 1, VM_IOREMAP, prot); + + if (p1 && p2) { + v = check_writebuffer(p1, p2); + reason = "enabling work-around"; + } else { + reason = "unable to map memory\n"; + } + + vunmap(p1); + vunmap(p2); + put_page(page); + } else { + reason = "unable to grab page\n"; + } + + if (v) { + printk("failed, %s\n", reason); + shared_pte_mask |= L_PTE_BUFFERABLE; + } else { + printk("ok\n"); + } +} --- linux-2.6.0-test5/arch/arm/mm/init.c 2003-07-10 18:50:30.000000000 -0700 +++ 25/arch/arm/mm/init.c 2003-09-18 22:02:45.000000000 -0700 @@ -33,12 +33,6 @@ #include #include -#ifndef CONFIG_DISCONTIGMEM -#define NR_NODES 1 -#else -#define NR_NODES 4 -#endif - #ifdef CONFIG_CPU_32 #define TABLE_OFFSET (PTRS_PER_PTE) #else @@ -178,7 +172,7 @@ find_memend_and_nodes(struct meminfo *mi { unsigned int i, bootmem_pages = 0, memend_pfn = 0; - for (i = 0; i < NR_NODES; i++) { + for (i = 0; i < MAX_NUMNODES; i++) { np[i].start = -1U; np[i].end = 0; np[i].bootmap_pages = 0; @@ -207,7 +201,7 @@ find_memend_and_nodes(struct meminfo *mi * we have, we're in trouble. (maybe we ought to * limit, instead of bugging?) */ - if (numnodes > NR_NODES) + if (numnodes > MAX_NUMNODES) BUG(); } @@ -365,7 +359,7 @@ static inline void free_bootmem_node_ban */ void __init bootmem_init(struct meminfo *mi) { - struct node_info node_info[NR_NODES], *np = node_info; + struct node_info node_info[MAX_NUMNODES], *np = node_info; unsigned int bootmap_pages, bootmap_pfn, map_pg; int node, initrd_node; --- linux-2.6.0-test5/arch/arm/mm/ioremap.c 2003-06-14 12:18:24.000000000 -0700 +++ 25/arch/arm/mm/ioremap.c 2003-09-18 21:38:20.000000000 -0700 @@ -150,7 +150,7 @@ __ioremap(unsigned long phys_addr, size_ if (!area) return NULL; addr = area->addr; - if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { + if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { vfree(addr); return NULL; } --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mm/Kconfig 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,344 @@ +comment "Processor Type" + +config CPU_32 + bool + default y + +# Select CPU types depending on the architecture selected. This selects +# which CPUs we support in the kernel image, and the compiler instruction +# optimiser behaviour. + +# ARM610 +config CPU_ARM610 + bool "Support ARM610 processor" + depends on ARCH_RPC + select CPU_32v3 + select CPU_CACHE_V3 + select CPU_COPY_V3 + select CPU_TLB_V3 + help + The ARM610 is the successor to the ARM3 processor + and was produced by VLSI Technology Inc. + + Say Y if you want support for the ARM610 processor. + Otherwise, say N. + +# ARM710 +config CPU_ARM710 + bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC + default y if ARCH_CLPS7500 + select CPU_32v3 + select CPU_CACHE_V3 + select CPU_COPY_V3 + select CPU_TLB_V3 + help + A 32-bit RISC microprocessor based on the ARM7 processor core + designed by Advanced RISC Machines Ltd. The ARM710 is the + successor to the ARM610 processor. It was released in + July 1994 by VLSI Technology Inc. + + Say Y if you want support for the ARM710 processor. + Otherwise, say N. + +# ARM720T +config CPU_ARM720T + bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR + default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 + select CPU_32v4 + select CPU_ABRT_LV4T + select CPU_CACHE_V4 + select CPU_COPY_V4WT + select CPU_TLB_V4WT + help + A 32-bit RISC processor with 8kByte Cache, Write Buffer and + MMU built around an ARM7TDMI core. + + Say Y if you want support for the ARM720T processor. + Otherwise, say N. + +# ARM920T +config CPU_ARM920T + bool "Support ARM920T processor" + depends on ARCH_INTEGRATOR + select CPU_32v4 + select CPU_ABRT_EV4T + select CPU_CACHE_V4WT + select CPU_COPY_V4WB + select CPU_TLB_V4WBI + help + The ARM920T is licensed to be produced by numerous vendors, + and is used in the Maverick EP9312. More information at + . + + Say Y if you want support for the ARM920T processor. + Otherwise, say N. + +# ARM922T +config CPU_ARM922T + bool + depends on ARCH_CAMELOT + default y + select CPU_32v4 + select CPU_ABRT_EV4T + select CPU_CACHE_V4WT + select CPU_COPY_V4WB + select CPU_TLB_V4WBI + help + The ARM922T is a version of the ARM920T, but with smaller + instruction and data caches. It is used in Altera's + Excalibur XA device family. + + Say Y if you want support for the ARM922T processor. + Otherwise, say N. + +# ARM926T +config CPU_ARM926T + bool "Support ARM926T processor" + depends on ARCH_INTEGRATOR + select CPU_32v5 + select CPU_ABRT_EV5TJ + select CPU_COPY_V4WB + select CPU_TLB_V4WBI + help + This is a variant of the ARM920. It has slightly different + instruction sequences for cache and TLB operations. Curiously, + there is no documentation on it at the ARM corporate website. + + Say Y if you want support for the ARM926T processor. + Otherwise, say N. + +# ARM1020 - needs validating +config CPU_ARM1020 + bool "Support ARM1020T (rev 0) processor" + depends on ARCH_INTEGRATOR + select CPU_32v5 + select CPU_ABRT_EV4T + select CPU_CACHE_V4WT + select CPU_COPY_V4WB + select CPU_TLB_V4WBI + help + The ARM1020 is the 32K cached version of the ARM10 processor, + with an addition of a floating-point unit. + + Say Y if you want support for the ARM1020 processor. + Otherwise, say N. + +# ARM1020E - needs validating +config CPU_ARM1020E + bool "Support ARM1020E processor" + depends on ARCH_INTEGRATOR + select CPU_32v5 + select CPU_ABRT_EV4T + select CPU_CACHE_V4WT + select CPU_COPY_V4WB + select CPU_TLB_V4WBI + depends on n + +# ARM1022E +config CPU_ARM1022 + bool "Support ARM1022E processor" + depends on ARCH_INTEGRATOR + select CPU_32v5 + select CPU_ABRT_EV4T + select CPU_COPY_V4WB # can probably do better + select CPU_TLB_V4WBI + help + The ARM1022E is an implementation of the ARMv5TE architecture + based upon the ARM10 integer core with a 16KiB L1 Harvard cache, + embedded trace macrocell, and a floating-point unit. + + Say Y if you want support for the ARM1022E processor. + Otherwise, say N. + +# ARM1026EJ-S +config CPU_ARM1026 + bool "Support ARM1026EJ-S processor" + depends on ARCH_INTEGRATOR + select CPU_32v5 + select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 + select CPU_COPY_V4WB # can probably do better + select CPU_TLB_V4WBI + help + The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture + based upon the ARM10 integer core. + + Say Y if you want support for the ARM1026EJ-S processor. + Otherwise, say N. + +# SA110 +config CPU_SA110 + bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && !ARCH_ANAKIN && ARCH_RPC + default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI || ARCH_ANAKIN + select CPU_32v3 if ARCH_RPC + select CPU_32v4 if !ARCH_RPC + select CPU_ABRT_EV4 + select CPU_CACHE_V4WB + select CPU_COPY_V4WB + select CPU_TLB_V4WB + help + The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and + is available at five speeds ranging from 100 MHz to 233 MHz. + More information is available at + . + + Say Y if you want support for the SA-110 processor. + Otherwise, say N. + +# SA1100 +config CPU_SA1100 + bool + depends on ARCH_SA1100 + default y + select CPU_32v4 + select CPU_ABRT_EV4 + select CPU_CACHE_V4WB + select CPU_TLB_V4WB + select CPU_MINICACHE + +# XScale +config CPU_XSCALE + bool + depends on ARCH_IOP3XX || ARCH_ADIFCC || ARCH_PXA + default y + select CPU_32v5 + select CPU_ABRT_EV5T + select CPU_TLB_V4WBI + select CPU_MINICACHE + +# This defines the compiler instruction set which depends on the machine type. +config CPU_32v3 + bool + +config CPU_32v4 + bool + +config CPU_32v5 + bool + +# The abort model +config CPU_ABRT_EV4 + bool + +config CPU_ABRT_EV4T + bool + +config CPU_ABRT_LV4T + bool + +config CPU_ABRT_EV5T + bool + +config CPU_ABRT_EV5TJ + bool + +# The cache model +config CPU_CACHE_V3 + bool + +config CPU_CACHE_V4 + bool + +config CPU_CACHE_V4WT + bool + +config CPU_CACHE_V4WB + bool + +# The copy-page model +config CPU_COPY_V3 + bool + +config CPU_COPY_V4WT + bool + +config CPU_COPY_V4WB + bool + +# This selects the TLB model +config CPU_TLB_V3 + bool + help + ARM Architecture Version 3 TLB. + +config CPU_TLB_V4WT + bool + help + ARM Architecture Version 4 TLB with writethrough cache. + +config CPU_TLB_V4WB + bool + help + ARM Architecture Version 4 TLB with writeback cache. + +config CPU_TLB_V4WBI + bool + help + ARM Architecture Version 4 TLB with writeback cache and invalidate + instruction cache entry. + +config CPU_TLB_V6 + bool + +config CPU_MINICACHE + bool + help + Processor has a minicache. + +comment "Processor Features" + +config ARM_THUMB + bool "Support Thumb user binaries" + depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE + default y + help + Say Y if you want to have kernel support for ARM Thumb instructions, + fault handlers, and system calls. + + The Thumb instruction set is a compressed form of the standard ARM + instruction set resulting in smaller binaries at the expense of + slightly less efficient code. + + If you don't know what this all is, saying Y is a safe choice. + +config CPU_BIG_ENDIAN + bool "Build big-endian kernel" + depends on ARCH_SUPPORTS_BIG_ENDIAN + help + Say Y if you plan on running a kernel in big-endian mode. + Note that your board must be properly built and your board + port must properly enable and big-endian related features + of your chipset/board/processor. + +config CPU_ICACHE_DISABLE + bool "Disable I-Cache" + depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 + help + Say Y here to disable the processor instruction cache. Unless + you have a reason not to or are unsure, say N. + +config CPU_DCACHE_DISABLE + bool "Disable D-Cache" + depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 + help + Say Y here to disable the processor data cache. Unless + you have a reason not to or are unsure, say N. + +config CPU_DCACHE_WRITETHROUGH + bool "Force write through D-cache" + depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE + help + Say Y here to use the data cache in writethough mode. Unless you + specifically require this or are unsure, say N. + +config CPU_CACHE_ROUND_ROBIN + bool "Round robin I and D cache replacement algorithm" + depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE) + help + Say Y here to use the predictable round-robin cache replacement + policy. Unless you specifically require this or are unsure, say N. + +config CPU_BPREDICT_DISABLE + bool "Disable branch prediction" + depends on CPU_ARM1020 + help + Say Y here to disable branch prediction. If unsure, say N. --- linux-2.6.0-test5/arch/arm/mm/Makefile 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/Makefile 2003-09-18 21:02:00.000000000 -0700 @@ -2,29 +2,48 @@ # Makefile for the linux arm-specific parts of the memory manager. # -# Object file lists. +obj-y := consistent.o extable.o fault-armv.o \ + fault-common.o init.o ioremap.o mm-armv.o -obj-y := consistent.o extable.o fault-armv.o fault-common.o \ - init.o ioremap.o mm-armv.o -obj-$(CONFIG_MODULES) += proc-syms.o - -obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o -obj-$(CONFIG_DISCONTIGMEM) += discontig.o - -# ARMv3 -p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o -p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o - -# ARMv4 -p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o -p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o -p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o -p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o -p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o -p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o - -# ARMv5 -p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o -p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wbi.o copypage-xscale.o abort-xscale.o minicache.o +obj-$(CONFIG_MODULES) += proc-syms.o -obj-y += $(sort $(p-y)) +obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o +obj-$(CONFIG_DISCONTIGMEM) += discontig.o + +obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o +obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o +obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o +obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o +obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o + +obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o +obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o +obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o +obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o + +obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o +obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o +obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o +obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o +obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o + +obj-$(CONFIG_CPU_MINICACHE) += minicache.o + +obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o +obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o +obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o +obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o + +obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o +obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o +obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o +obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o +obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o +obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o +obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o +obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o +obj-$(CONFIG_CPU_ARM1022) += proc-arm1022.o +obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o +obj-$(CONFIG_CPU_SA110) += proc-sa110.o +obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o +obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o --- linux-2.6.0-test5/arch/arm/mm/mm-armv.c 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/mm-armv.c 2003-09-18 21:02:00.000000000 -0700 @@ -392,12 +392,19 @@ static void __init create_mapping(struct long off; if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) { - printk(KERN_WARNING "MM: not creating mapping for " + printk(KERN_WARNING "BUG: not creating mapping for " "0x%08lx at 0x%08lx in user region\n", md->physical, md->virtual); return; } + if (md->type == MT_DEVICE && + md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { + printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " + "overlaps vmalloc space\n", + md->physical, md->virtual); + } + domain = mem_types[md->type].domain; prot_pte = __pgprot(mem_types[md->type].prot_pte); prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); @@ -409,7 +416,7 @@ static void __init create_mapping(struct if (mem_types[md->type].prot_l1 == 0 && (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { - printk(KERN_WARNING "MM: map for 0x%08lx at 0x%08lx can not " + printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " "be mapped using pages, ignoring.\n", md->physical, md->virtual); return; --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mm/proc-arm1020e.S 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,500 @@ +/* + * linux/arch/arm/mm/proc-arm1020e.S: MMU functions for ARM1020 + * + * Copyright (C) 2000 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * These are the low level assembler for performing cache and TLB + * functions on the arm1020e. + * + * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This is the maximum size of an area which will be invalidated + * using the single invalidate entry instructions. Anything larger + * than this, and we go for the whole cache. + * + * This value should be chosen such that we choose the cheapest + * alternative. + */ +#define MAX_AREA_SIZE 32768 + +/* + * The size of one data cache line. + */ +#define CACHE_DLINESIZE 32 + +/* + * The number of data cache segments. + */ +#define CACHE_DSEGMENTS 16 + +/* + * The number of lines in a cache segment. + */ +#define CACHE_DENTRIES 64 + +/* + * This is the size at which it becomes more efficient to + * clean the whole cache, rather than using the individual + * cache line maintainence instructions. + */ +#define CACHE_DLIMIT 32768 + + .text +/* + * cpu_arm1020e_proc_init() + */ +ENTRY(cpu_arm1020e_proc_init) + mov pc, lr + +/* + * cpu_arm1020e_proc_fin() + */ +ENTRY(cpu_arm1020e_proc_fin) + stmfd sp!, {lr} + mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE + msr cpsr_c, ip + bl arm1020e_flush_kern_cache_all + mrc p15, 0, r0, c1, c0, 0 @ ctrl register + bic r0, r0, #0x1000 @ ...i............ + bic r0, r0, #0x000e @ ............wca. + mcr p15, 0, r0, c1, c0, 0 @ disable caches + ldmfd sp!, {pc} + +/* + * cpu_arm1020e_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the + * same state as it would be if it had been reset, and branch + * to what would be the reset vector. + * + * loc: location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_arm1020e_reset) + mov ip, #0 + mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mrc p15, 0, ip, c1, c0, 0 @ ctrl register + bic ip, ip, #0x000f @ ............wcam + bic ip, ip, #0x1100 @ ...i...s........ + mcr p15, 0, ip, c1, c0, 0 @ ctrl register + mov pc, r0 + +/* + * cpu_arm1020e_do_idle() + */ + .align 5 +ENTRY(cpu_arm1020e_do_idle) + mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt + mov pc, lr + +/* ================================= CACHE ================================ */ + + .align 5 +/* + * flush_user_cache_all() + * + * Invalidate all cache entries in a particular address + * space. + */ +ENTRY(arm1020e_flush_user_cache_all) + /* FALLTHROUGH */ +/* + * flush_kern_cache_all() + * + * Clean and invalidate the entire cache. + */ +ENTRY(arm1020e_flush_kern_cache_all) + mov r2, #VM_EXEC + mov ip, #0 +__flush_whole_cache: +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments +1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries +2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index + subs r3, r3, #1 << 26 + bcs 2b @ entries 63 to 0 + subs r1, r1, #1 << 5 + bcs 1b @ segments 15 to 0 +#endif + tst r2, #VM_EXEC +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache +#endif + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_user_cache_range(start, end, flags) + * + * Invalidate a range of cache entries in the specified + * address space. + * + * - start - start address (inclusive) + * - end - end address (exclusive) + * - flags - vm_flags for this space + */ +ENTRY(arm1020e_flush_user_cache_range) + mov ip, #0 + sub r3, r1, r0 @ calculate total size + cmp r3, #CACHE_DLIMIT + bhs __flush_whole_cache + +#ifndef CONFIG_CPU_DCACHE_DISABLE +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + tst r2, #VM_EXEC +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache +#endif + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1020e_coherent_kern_range) + mov ip, #0 + bic r0, r0, #CACHE_DLINESIZE - 1 +1: +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r0, c7, c10, 1 @ clean D entry +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry +#endif + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_kern_dcache_page(void *page) + * + * Ensure no D cache aliasing occurs, either with itself or + * the I cache + * + * - page - page aligned address + */ +ENTRY(arm1020e_flush_kern_dcache_page) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + add r1, r0, #PAGE_SZ +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(arm1020e_dma_inv_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + tst r0, #CACHE_DLINESIZE - 1 + bic r0, r0, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r0, c7, c10, 1 @ clean D entry + tst r1, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r1, c7, c10, 1 @ clean D entry +1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_clean_range(start, end) + * + * Clean the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(arm1020e_dma_clean_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1020e_dma_flush_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +ENTRY(arm1020e_cache_fns) + .long arm1020e_flush_kern_cache_all + .long arm1020e_flush_user_cache_all + .long arm1020e_flush_user_cache_range + .long arm1020e_coherent_kern_range + .long arm1020e_flush_kern_dcache_page + .long arm1020e_dma_inv_range + .long arm1020e_dma_clean_range + .long arm1020e_dma_flush_range + + .align 5 +ENTRY(cpu_arm1020e_dcache_clean_area) +#ifndef CONFIG_CPU_DCACHE_DISABLE + mov ip, #0 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + subs r1, r1, #CACHE_DLINESIZE + bhi 1b +#endif + mov pc, lr + +/* =============================== PageTable ============================== */ + +/* + * cpu_arm1020e_switch_mm(pgd) + * + * Set the translation base pointer to be as described by pgd. + * + * pgd: new page tables + */ + .align 5 +ENTRY(cpu_arm1020e_switch_mm) +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r3, c7, c10, 4 + mov r1, #0xF @ 16 segments +1: mov r3, #0x3F @ 64 entries +2: mov ip, r3, LSL #26 @ shift up entry + orr ip, ip, r1, LSL #5 @ shift in/up index + mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry + mov ip, #0 + subs r3, r3, #1 + cmp r3, #0 + bge 2b @ entries 3F to 0 + subs r1, r1, #1 + cmp r1, #0 + bge 1b @ segments 15 to 0 + +#endif + mov r1, #0 +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache +#endif + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mcr p15, 0, r0, c2, c0, 0 @ load page table pointer + mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs + mov pc, lr + +/* + * cpu_arm1020e_set_pte(ptep, pte) + * + * Set a PTE and flush it out + */ + .align 5 +ENTRY(cpu_arm1020e_set_pte) + str r1, [r0], #-2048 @ linux version + + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY + + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL + + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW + + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW + + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? + movne r2, #0 + +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + eor r3, r1, #0x0a @ C & small page? + tst r3, #0x0b + biceq r2, r2, #4 +#endif + str r2, [r0] @ hardware version + mov r0, r0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r0, c7, c10, 1 @ clean D entry +#endif + mov pc, lr + + +ENTRY(cpu_arm1020e_name) + .ascii "ARM1020E" +#ifndef CONFIG_CPU_ICACHE_DISABLE + .ascii "i" +#endif +#ifndef CONFIG_CPU_DCACHE_DISABLE + .ascii "d" +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + .ascii "(wt)" +#else + .ascii "(wb)" +#endif +#endif +#ifndef CONFIG_CPU_BPREDICT_DISABLE + .ascii "B" +#endif +#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN + .ascii "RR" +#endif + .ascii "\0" + .align + + __INIT + +__arm1020e_setup: + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 + mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 + mcr p15, 0, r4, c2, c0 @ load page table pointer + mov r0, #0x1f @ Domains 0, 1 = client + mcr p15, 0, r0, c3, c0 @ load domain access register + mrc p15, 0, r0, c1, c0 @ get control register v4 +/* + * Clear out 'unwanted' bits (then put them in if we need them) + */ + bic r0, r0, #0x1e00 @ i...??r......... + bic r0, r0, #0x000e @ ............wca. +/* + * Turn on what we want + */ + orr r0, r0, #0x0031 @ ..........DP...M + orr r0, r0, #0x0100 @ .......S........ + +#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN + orr r0, r0, #0x4000 @ .R.............. +#endif +#ifndef CONFIG_CPU_BPREDICT_DISABLE + orr r0, r0, #0x0800 @ ....Z........... +#endif +#ifndef CONFIG_CPU_DCACHE_DISABLE + orr r0, r0, #0x0004 @ Enable D cache +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + orr r0, r0, #0x1000 @ I Cache on +#endif + mov pc, lr + + .text + +/* + * Purpose : Function pointers used to access above functions - all calls + * come through these + */ + .type arm1020e_processor_functions, #object +arm1020e_processor_functions: + .word v4t_early_abort + .word cpu_arm1020e_proc_init + .word cpu_arm1020e_proc_fin + .word cpu_arm1020e_reset + .word cpu_arm1020e_do_idle + .word cpu_arm1020e_dcache_clean_area + .word cpu_arm1020e_switch_mm + .word cpu_arm1020e_set_pte + + .size arm1020e_processor_functions, . - arm1020e_processor_functions + + .type cpu_arch_name, #object +cpu_arch_name: + .asciz "armv5te" + .size cpu_arch_name, . - cpu_arch_name + + .type cpu_elf_name, #object +cpu_elf_name: + .asciz "v5" + .size cpu_elf_name, . - cpu_elf_name + .align + + .section ".proc.info", #alloc, #execinstr + + .type __arm1020e_proc_info,#object +__arm1020e_proc_info: + .long 0x4105a200 @ ARM 1020TE (Architecture v5TE) + .long 0xff0ffff0 + .long 0x00000c12 @ mmuflags + b __arm1020e_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB + .long cpu_arm1020e_name + .long arm1020e_processor_functions + .long v4wbi_tlb_fns + .long v4wb_user_fns + .long arm1020e_cache_fns + .size __arm1020e_proc_info, . - __arm1020e_proc_info --- linux-2.6.0-test5/arch/arm/mm/proc-arm1020.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-arm1020.S 2003-09-18 21:02:00.000000000 -0700 @@ -1,5 +1,5 @@ /* - * linux/arch/arm/mm/arm1020.S: MMU functions for ARM1020 + * linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020 * * Copyright (C) 2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -379,19 +380,19 @@ ENTRY(cpu_arm1020_switch_mm) ENTRY(cpu_arm1020_set_pte) str r1, [r0], #-2048 @ linux version - eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 - orr r2, r2, #HPTE_TYPE_SMALL + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL - tst r1, #LPTE_USER @ User? - orrne r2, r2, #HPTE_AP_READ + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? - orreq r2, r2, #HPTE_AP_WRITE + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW - tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young? + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? movne r2, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -410,7 +411,7 @@ ENTRY(cpu_arm1020_set_pte) ENTRY(cpu_arm1020_name) - .ascii "Arm1020" + .ascii "ARM1020" #ifndef CONFIG_CPU_ICACHE_DISABLE .ascii "i" #endif @@ -445,10 +446,8 @@ __arm1020_setup: /* * Clear out 'unwanted' bits (then put them in if we need them) */ - bic r0, r0, #0x0e00 @ ....??r......... - bic r0, r0, #0x0002 @ ..............a. - bic r0, r0, #0x000c @ W,D - bic r0, r0, #0x1000 @ I + bic r0, r0, #0x1e00 @ i...??r......... + bic r0, r0, #0x000e @ ............wca. /* * Turn on what we want */ @@ -490,12 +489,12 @@ arm1020_processor_functions: .type cpu_arch_name, #object cpu_arch_name: - .asciz "armv4t" + .asciz "armv5t" .size cpu_arch_name, . - cpu_arch_name .type cpu_elf_name, #object cpu_elf_name: - .asciz "v4" + .asciz "v5" .size cpu_elf_name, . - cpu_elf_name .align @@ -503,8 +502,8 @@ cpu_elf_name: .type __arm1020_proc_info,#object __arm1020_proc_info: - .long 0x4100a200 - .long 0xff00fff0 + .long 0x4104a200 @ ARM 1020T (Architecture v5T) + .long 0xff0ffff0 .long 0x00000c02 @ mmuflags b __arm1020_setup .long cpu_arch_name --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mm/proc-arm1022.S 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,480 @@ +/* + * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E + * + * Copyright (C) 2000 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * These are the low level assembler for performing cache and TLB + * functions on the ARM1022E. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This is the maximum size of an area which will be invalidated + * using the single invalidate entry instructions. Anything larger + * than this, and we go for the whole cache. + * + * This value should be chosen such that we choose the cheapest + * alternative. + */ +#define MAX_AREA_SIZE 32768 + +/* + * The size of one data cache line. + */ +#define CACHE_DLINESIZE 32 + +/* + * The number of data cache segments. + */ +#define CACHE_DSEGMENTS 16 + +/* + * The number of lines in a cache segment. + */ +#define CACHE_DENTRIES 64 + +/* + * This is the size at which it becomes more efficient to + * clean the whole cache, rather than using the individual + * cache line maintainence instructions. + */ +#define CACHE_DLIMIT 32768 + + .text +/* + * cpu_arm1022_proc_init() + */ +ENTRY(cpu_arm1022_proc_init) + mov pc, lr + +/* + * cpu_arm1022_proc_fin() + */ +ENTRY(cpu_arm1022_proc_fin) + stmfd sp!, {lr} + mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE + msr cpsr_c, ip + bl arm1022_flush_kern_cache_all + mrc p15, 0, r0, c1, c0, 0 @ ctrl register + bic r0, r0, #0x1000 @ ...i............ + bic r0, r0, #0x000e @ ............wca. + mcr p15, 0, r0, c1, c0, 0 @ disable caches + ldmfd sp!, {pc} + +/* + * cpu_arm1022_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the + * same state as it would be if it had been reset, and branch + * to what would be the reset vector. + * + * loc: location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_arm1022_reset) + mov ip, #0 + mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mrc p15, 0, ip, c1, c0, 0 @ ctrl register + bic ip, ip, #0x000f @ ............wcam + bic ip, ip, #0x1100 @ ...i...s........ + mcr p15, 0, ip, c1, c0, 0 @ ctrl register + mov pc, r0 + +/* + * cpu_arm1022_do_idle() + */ + .align 5 +ENTRY(cpu_arm1022_do_idle) + mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt + mov pc, lr + +/* ================================= CACHE ================================ */ + + .align 5 +/* + * flush_user_cache_all() + * + * Invalidate all cache entries in a particular address + * space. + */ +ENTRY(arm1022_flush_user_cache_all) + /* FALLTHROUGH */ +/* + * flush_kern_cache_all() + * + * Clean and invalidate the entire cache. + */ +ENTRY(arm1022_flush_kern_cache_all) + mov r2, #VM_EXEC + mov ip, #0 +__flush_whole_cache: +#ifndef CONFIG_CPU_DCACHE_DISABLE + mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments +1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries +2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index + subs r3, r3, #1 << 26 + bcs 2b @ entries 63 to 0 + subs r1, r1, #1 << 5 + bcs 1b @ segments 15 to 0 +#endif + tst r2, #VM_EXEC +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache +#endif + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_user_cache_range(start, end, flags) + * + * Invalidate a range of cache entries in the specified + * address space. + * + * - start - start address (inclusive) + * - end - end address (exclusive) + * - flags - vm_flags for this space + */ +ENTRY(arm1022_flush_user_cache_range) + mov ip, #0 + sub r3, r1, r0 @ calculate total size + cmp r3, #CACHE_DLIMIT + bhs __flush_whole_cache + +#ifndef CONFIG_CPU_DCACHE_DISABLE +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + tst r2, #VM_EXEC +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache +#endif + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1022_coherent_kern_range) + mov ip, #0 + bic r0, r0, #CACHE_DLINESIZE - 1 +1: +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r0, c7, c10, 1 @ clean D entry +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry +#endif + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_kern_dcache_page(void *page) + * + * Ensure no D cache aliasing occurs, either with itself or + * the I cache + * + * - page - page aligned address + */ +ENTRY(arm1022_flush_kern_dcache_page) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + add r1, r0, #PAGE_SZ +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(arm1022_dma_inv_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + tst r0, #CACHE_DLINESIZE - 1 + bic r0, r0, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r0, c7, c10, 1 @ clean D entry + tst r1, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r1, c7, c10, 1 @ clean D entry +1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_clean_range(start, end) + * + * Clean the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(arm1022_dma_clean_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1022_dma_flush_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +ENTRY(arm1022_cache_fns) + .long arm1022_flush_kern_cache_all + .long arm1022_flush_user_cache_all + .long arm1022_flush_user_cache_range + .long arm1022_coherent_kern_range + .long arm1022_flush_kern_dcache_page + .long arm1022_dma_inv_range + .long arm1022_dma_clean_range + .long arm1022_dma_flush_range + + .align 5 +ENTRY(cpu_arm1022_dcache_clean_area) +#ifndef CONFIG_CPU_DCACHE_DISABLE + mov ip, #0 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + subs r1, r1, #CACHE_DLINESIZE + bhi 1b +#endif + mov pc, lr + +/* =============================== PageTable ============================== */ + +/* + * cpu_arm1022_switch_mm(pgd) + * + * Set the translation base pointer to be as described by pgd. + * + * pgd: new page tables + */ + .align 5 +ENTRY(cpu_arm1022_switch_mm) +#ifndef CONFIG_CPU_DCACHE_DISABLE + mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments +1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries +2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index + subs r3, r3, #1 << 26 + bcs 2b @ entries 63 to 0 + subs r1, r1, #1 << 5 + bcs 1b @ segments 15 to 0 +#endif + mov r1, #0 +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache +#endif + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mcr p15, 0, r0, c2, c0, 0 @ load page table pointer + mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs + mov pc, lr + +/* + * cpu_arm1022_set_pte(ptep, pte) + * + * Set a PTE and flush it out + */ + .align 5 +ENTRY(cpu_arm1022_set_pte) + str r1, [r0], #-2048 @ linux version + + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY + + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL + + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW + + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW + + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? + movne r2, #0 + +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + eor r3, r1, #0x0a @ C & small page? + tst r3, #0x0b + biceq r2, r2, #4 +#endif + str r2, [r0] @ hardware version + mov r0, r0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r0, c7, c10, 1 @ clean D entry +#endif + mov pc, lr + + +ENTRY(cpu_arm1022_name) + .ascii "arm1022" +#ifndef CONFIG_CPU_ICACHE_DISABLE + .ascii "i" +#endif +#ifndef CONFIG_CPU_DCACHE_DISABLE + .ascii "d" +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + .ascii "(wt)" +#else + .ascii "(wb)" +#endif +#endif +#ifndef CONFIG_CPU_BPREDICT_DISABLE + .ascii "B" +#endif +#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN + .ascii "RR" +#endif + .ascii "\0" + .align + + __INIT + +__arm1022_setup: + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 + mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 + mcr p15, 0, r4, c2, c0 @ load page table pointer + mov r0, #0x1f @ Domains 0, 1 = client + mcr p15, 0, r0, c3, c0 @ load domain access register + mrc p15, 0, r0, c1, c0 @ get control register v4 +/* + * Clear out 'unwanted' bits (then put them in if we need them) + */ + bic r0, r0, #0x1e00 @ ...i??r......... + bic r0, r0, #0x000e @ ............wca. +/* + * Turn on what we want + */ + orr r0, r0, #0x0031 @ ..........DP...M + orr r0, r0, #0x2100 @ ..V....S........ + +#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN + orr r0, r0, #0x4000 @ .R.............. +#endif +#ifndef CONFIG_CPU_BPREDICT_DISABLE + orr r0, r0, #0x0800 @ ....Z........... +#endif +#ifndef CONFIG_CPU_DCACHE_DISABLE + orr r0, r0, #0x0004 @ .............C.. +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + orr r0, r0, #0x1000 @ ...I............ +#endif + mov pc, lr + + .text + +/* + * Purpose : Function pointers used to access above functions - all calls + * come through these + */ + .type arm1022_processor_functions, #object +arm1022_processor_functions: + .word v4t_early_abort + .word cpu_arm1022_proc_init + .word cpu_arm1022_proc_fin + .word cpu_arm1022_reset + .word cpu_arm1022_do_idle + .word cpu_arm1022_dcache_clean_area + .word cpu_arm1022_switch_mm + .word cpu_arm1022_set_pte + + .size arm1022_processor_functions, . - arm1022_processor_functions + + .type cpu_arch_name, #object +cpu_arch_name: + .asciz "armv5te" + .size cpu_arch_name, . - cpu_arch_name + + .type cpu_elf_name, #object +cpu_elf_name: + .asciz "v5" + .size cpu_elf_name, . - cpu_elf_name + .align + + .section ".proc.info", #alloc, #execinstr + + .type __arm1022_proc_info,#object +__arm1022_proc_info: + .long 0x4105a220 @ ARM 1022E (v5TE) + .long 0xff0ffff0 + .long 0x00000c12 @ mmuflags + b __arm1022_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB + .long cpu_arm1022_name + .long arm1022_processor_functions + .long v4wbi_tlb_fns + .long v4wb_user_fns + .long arm1022_cache_fns + .size __arm1022_proc_info, . - __arm1022_proc_info --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25/arch/arm/mm/proc-arm1026.S 2003-09-18 21:02:00.000000000 -0700 @@ -0,0 +1,476 @@ +/* + * linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S + * + * Copyright (C) 2000 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * These are the low level assembler for performing cache and TLB + * functions on the ARM1026EJ-S. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This is the maximum size of an area which will be invalidated + * using the single invalidate entry instructions. Anything larger + * than this, and we go for the whole cache. + * + * This value should be chosen such that we choose the cheapest + * alternative. + */ +#define MAX_AREA_SIZE 32768 + +/* + * The size of one data cache line. + */ +#define CACHE_DLINESIZE 32 + +/* + * The number of data cache segments. + */ +#define CACHE_DSEGMENTS 16 + +/* + * The number of lines in a cache segment. + */ +#define CACHE_DENTRIES 64 + +/* + * This is the size at which it becomes more efficient to + * clean the whole cache, rather than using the individual + * cache line maintainence instructions. + */ +#define CACHE_DLIMIT 32768 + + .text +/* + * cpu_arm1026_proc_init() + */ +ENTRY(cpu_arm1026_proc_init) + mov pc, lr + +/* + * cpu_arm1026_proc_fin() + */ +ENTRY(cpu_arm1026_proc_fin) + stmfd sp!, {lr} + mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE + msr cpsr_c, ip + bl arm1026_flush_kern_cache_all + mrc p15, 0, r0, c1, c0, 0 @ ctrl register + bic r0, r0, #0x1000 @ ...i............ + bic r0, r0, #0x000e @ ............wca. + mcr p15, 0, r0, c1, c0, 0 @ disable caches + ldmfd sp!, {pc} + +/* + * cpu_arm1026_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the + * same state as it would be if it had been reset, and branch + * to what would be the reset vector. + * + * loc: location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_arm1026_reset) + mov ip, #0 + mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs + mrc p15, 0, ip, c1, c0, 0 @ ctrl register + bic ip, ip, #0x000f @ ............wcam + bic ip, ip, #0x1100 @ ...i...s........ + mcr p15, 0, ip, c1, c0, 0 @ ctrl register + mov pc, r0 + +/* + * cpu_arm1026_do_idle() + */ + .align 5 +ENTRY(cpu_arm1026_do_idle) + mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt + mov pc, lr + +/* ================================= CACHE ================================ */ + + .align 5 +/* + * flush_user_cache_all() + * + * Invalidate all cache entries in a particular address + * space. + */ +ENTRY(arm1026_flush_user_cache_all) + /* FALLTHROUGH */ +/* + * flush_kern_cache_all() + * + * Clean and invalidate the entire cache. + */ +ENTRY(arm1026_flush_kern_cache_all) + mov r2, #VM_EXEC + mov ip, #0 +__flush_whole_cache: +#ifndef CONFIG_CPU_DCACHE_DISABLE +1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate + bne 1b +#endif + tst r2, #VM_EXEC +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache +#endif + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_user_cache_range(start, end, flags) + * + * Invalidate a range of cache entries in the specified + * address space. + * + * - start - start address (inclusive) + * - end - end address (exclusive) + * - flags - vm_flags for this space + */ +ENTRY(arm1026_flush_user_cache_range) + mov ip, #0 + sub r3, r1, r0 @ calculate total size + cmp r3, #CACHE_DLIMIT + bhs __flush_whole_cache + +#ifndef CONFIG_CPU_DCACHE_DISABLE +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + tst r2, #VM_EXEC +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache +#endif + mcrne p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * coherent_kern_range(start, end) + * + * Ensure coherency between the Icache and the Dcache in the + * region described by start. If you have non-snooping + * Harvard caches, you need to implement this function. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1026_coherent_kern_range) + mov ip, #0 + bic r0, r0, #CACHE_DLINESIZE - 1 +1: +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r0, c7, c10, 1 @ clean D entry +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry +#endif + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * flush_kern_dcache_page(void *page) + * + * Ensure no D cache aliasing occurs, either with itself or + * the I cache + * + * - page - page aligned address + */ +ENTRY(arm1026_flush_kern_dcache_page) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + add r1, r0, #PAGE_SZ +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(arm1026_dma_inv_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + tst r0, #CACHE_DLINESIZE - 1 + bic r0, r0, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r0, c7, c10, 1 @ clean D entry + tst r1, #CACHE_DLINESIZE - 1 + mcrne p15, 0, r1, c7, c10, 1 @ clean D entry +1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_clean_range(start, end) + * + * Clean the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + * + * (same as v4wb) + */ +ENTRY(arm1026_dma_clean_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +/* + * dma_flush_range(start, end) + * + * Clean and invalidate the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(arm1026_dma_flush_range) + mov ip, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + bic r0, r0, #CACHE_DLINESIZE - 1 +1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry + add r0, r0, #CACHE_DLINESIZE + cmp r0, r1 + blo 1b +#endif + mcr p15, 0, ip, c7, c10, 4 @ drain WB + mov pc, lr + +ENTRY(arm1026_cache_fns) + .long arm1026_flush_kern_cache_all + .long arm1026_flush_user_cache_all + .long arm1026_flush_user_cache_range + .long arm1026_coherent_kern_range + .long arm1026_flush_kern_dcache_page + .long arm1026_dma_inv_range + .long arm1026_dma_clean_range + .long arm1026_dma_flush_range + + .align 5 +ENTRY(cpu_arm1026_dcache_clean_area) +#ifndef CONFIG_CPU_DCACHE_DISABLE + mov ip, #0 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, #CACHE_DLINESIZE + subs r1, r1, #CACHE_DLINESIZE + bhi 1b +#endif + mov pc, lr + +/* =============================== PageTable ============================== */ + +/* + * cpu_arm1026_switch_mm(pgd) + * + * Set the translation base pointer to be as described by pgd. + * + * pgd: new page tables + */ + .align 5 +ENTRY(cpu_arm1026_switch_mm) + mov r1, #0 +#ifndef CONFIG_CPU_DCACHE_DISABLE +1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate + bne 1b +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache +#endif + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mcr p15, 0, r0, c2, c0, 0 @ load page table pointer + mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs + mov pc, lr + +/* + * cpu_arm1026_set_pte(ptep, pte) + * + * Set a PTE and flush it out + */ + .align 5 +ENTRY(cpu_arm1026_set_pte) + str r1, [r0], #-2048 @ linux version + + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY + + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL + + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW + + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW + + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? + movne r2, #0 + +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + eor r3, r1, #0x0a @ C & small page? + tst r3, #0x0b + biceq r2, r2, #4 +#endif + str r2, [r0] @ hardware version + mov r0, r0 +#ifndef CONFIG_CPU_DCACHE_DISABLE + mcr p15, 0, r0, c7, c10, 1 @ clean D entry +#endif + mov pc, lr + + + __INIT + +__arm1026_setup: + mov r0, #0 + mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 + mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 + mcr p15, 0, r4, c2, c0 @ load page table pointer +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + mov r0, #4 @ explicitly disable writeback + mcr p15, 7, r0, c15, c0, 0 +#endif + mov r0, #0x1f @ Domains 0, 1 = client + mcr p15, 0, r0, c3, c0 @ load domain access register + mrc p15, 0, r0, c1, c0 @ get control register v4 +/* + * Clear out 'unwanted' bits (then put them in if we need them) + */ + bic r0, r0, #0x1e00 @ ...i??r......... + bic r0, r0, #0x000e @ ............wca. +/* + * Turn on what we want + */ + orr r0, r0, #0x0031 @ ..........DP...M + orr r0, r0, #0x2100 @ ..V....S........ + +#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN + orr r0, r0, #0x4000 @ .R.............. +#endif +#ifndef CONFIG_CPU_BPREDICT_DISABLE + orr r0, r0, #0x0800 @ ....Z........... +#endif +#ifndef CONFIG_CPU_DCACHE_DISABLE + orr r0, r0, #0x0004 @ .............C.. +#endif +#ifndef CONFIG_CPU_ICACHE_DISABLE + orr r0, r0, #0x1000 @ ...I............ +#endif + mov pc, lr + +/* + * Purpose : Function pointers used to access above functions - all calls + * come through these + */ + .type arm1026_processor_functions, #object +arm1026_processor_functions: + .word ev5t_early_abort + .word cpu_arm1026_proc_init + .word cpu_arm1026_proc_fin + .word cpu_arm1026_reset + .word cpu_arm1026_do_idle + .word cpu_arm1026_dcache_clean_area + .word cpu_arm1026_switch_mm + .word cpu_arm1026_set_pte + + .size arm1026_processor_functions, . - arm1026_processor_functions + + .section .rodata + + .type cpu_arch_name, #object +cpu_arch_name: + .asciz "armv5tej" + .size cpu_arch_name, . - cpu_arch_name + + .type cpu_elf_name, #object +cpu_elf_name: + .asciz "v5" + .size cpu_elf_name, . - cpu_elf_name + .align + + .type cpu_arm1026_name, #object +ENTRY(cpu_arm1026_name) + .ascii "ARM1026EJ-S" +#ifndef CONFIG_CPU_ICACHE_DISABLE + .ascii "i" +#endif +#ifndef CONFIG_CPU_DCACHE_DISABLE + .ascii "d" +#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH + .ascii "(wt)" +#else + .ascii "(wb)" +#endif +#endif +#ifndef CONFIG_CPU_BPREDICT_DISABLE + .ascii "B" +#endif +#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN + .ascii "RR" +#endif + .ascii "\0" + .align + .size cpu_arm1026_name, . - cpu_arm1026_name + + .section ".proc.info", #alloc, #execinstr + + .type __arm1026_proc_info,#object +__arm1026_proc_info: + .long 0x4106a260 @ ARM 1026EJ-S (v5TEJ) + .long 0xff0ffff0 + .long 0x00000c12 @ mmuflags + b __arm1026_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT + .long cpu_arm1026_name + .long arm1026_processor_functions + .long v4wbi_tlb_fns + .long v4wb_user_fns + .long arm1026_cache_fns + .size __arm1026_proc_info, . - __arm1026_proc_info --- linux-2.6.0-test5/arch/arm/mm/proc-arm6_7.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-arm6_7.S 2003-09-18 21:02:00.000000000 -0700 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -214,19 +215,19 @@ ENTRY(cpu_arm6_set_pte) ENTRY(cpu_arm7_set_pte) str r1, [r0], #-2048 @ linux version - eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 - orr r2, r2, #HPTE_TYPE_SMALL + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL - tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? - orrne r2, r2, #HPTE_AP_READ + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? - orreq r2, r2, #HPTE_AP_WRITE + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW - tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young movne r2, #0 str r2, [r0] @ hardware version --- linux-2.6.0-test5/arch/arm/mm/proc-arm720.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-arm720.S 2003-09-18 21:02:00.000000000 -0700 @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -90,19 +91,19 @@ ENTRY(cpu_arm720_switch_mm) ENTRY(cpu_arm720_set_pte) str r1, [r0], #-2048 @ linux version - eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 - orr r2, r2, #HPTE_TYPE_SMALL + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL - tst r1, #LPTE_USER @ User? - orrne r2, r2, #HPTE_AP_READ + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? - orreq r2, r2, #HPTE_AP_WRITE + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW - tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young movne r2, #0 str r2, [r0] @ hardware version --- linux-2.6.0-test5/arch/arm/mm/proc-arm920.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-arm920.S 2003-09-18 21:02:00.000000000 -0700 @@ -1,5 +1,5 @@ /* - * linux/arch/arm/mm/arm920.S: MMU functions for ARM920 + * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -333,19 +334,19 @@ ENTRY(cpu_arm920_switch_mm) ENTRY(cpu_arm920_set_pte) str r1, [r0], #-2048 @ linux version - eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 - orr r2, r2, #HPTE_TYPE_SMALL + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL - tst r1, #LPTE_USER @ User or Exec? - orrne r2, r2, #HPTE_AP_READ + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? - orreq r2, r2, #HPTE_AP_WRITE + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW - tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young? + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? movne r2, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -361,7 +362,7 @@ ENTRY(cpu_arm920_set_pte) ENTRY(cpu_arm920_name) - .ascii "Arm920T" + .ascii "ARM920T" #ifndef CONFIG_CPU_ICACHE_DISABLE .ascii "i" #endif --- linux-2.6.0-test5/arch/arm/mm/proc-arm922.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-arm922.S 2003-09-18 21:02:00.000000000 -0700 @@ -1,5 +1,5 @@ /* - * linux/arch/arm/mm/arm922.S: MMU functions for ARM922 + * linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922 * * Copyright (C) 1999,2000 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -337,19 +338,19 @@ ENTRY(cpu_arm922_switch_mm) ENTRY(cpu_arm922_set_pte) str r1, [r0], #-2048 @ linux version - eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 - orr r2, r2, #HPTE_TYPE_SMALL + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL - tst r1, #LPTE_USER @ User? - orrne r2, r2, #HPTE_AP_READ + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? - orreq r2, r2, #HPTE_AP_WRITE + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW - tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young? + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? movne r2, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -365,7 +366,7 @@ ENTRY(cpu_arm922_set_pte) ENTRY(cpu_arm922_name) - .ascii "Arm922T" + .ascii "ARM922T" #ifndef CONFIG_CPU_ICACHE_DISABLE .ascii "i" #endif --- linux-2.6.0-test5/arch/arm/mm/proc-arm926.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-arm926.S 2003-09-18 21:02:00.000000000 -0700 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -337,19 +338,19 @@ ENTRY(cpu_arm926_switch_mm) ENTRY(cpu_arm926_set_pte) str r1, [r0], #-2048 @ linux version - eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY + eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 - orr r2, r2, #HPTE_TYPE_SMALL + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK + orr r2, r2, #PTE_TYPE_SMALL - tst r1, #LPTE_USER @ User? - orrne r2, r2, #HPTE_AP_READ + tst r1, #L_PTE_USER @ User? + orrne r2, r2, #PTE_SMALL_AP_URO_SRW - tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? - orreq r2, r2, #HPTE_AP_WRITE + tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? + orreq r2, r2, #PTE_SMALL_AP_UNO_SRW - tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young? + tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? movne r2, #0 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -436,7 +437,7 @@ __arm926_setup: */ .type arm926_processor_functions, #object arm926_processor_functions: - .word v5tej_early_abort + .word v5tj_early_abort .word cpu_arm926_proc_init .word cpu_arm926_proc_fin .word cpu_arm926_reset @@ -461,8 +462,8 @@ cpu_elf_name: .type __arm926_proc_info,#object __arm926_proc_info: - .long 0x41009260 - .long 0xff00fff0 + .long 0x41069260 @ ARM926EJ-S (v5TEJ) + .long 0xff0ffff0 .long 0x00000c1e @ mmuflags b __arm926_setup .long cpu_arch_name --- linux-2.6.0-test5/arch/arm/mm/proc-sa1100.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-sa1100.S 2003-09-18 21:02:00.000000000 -0700 @@ -1,5 +1,5 @@ /* - * linux/arch/arm/mm/proc-sa110.S + * linux/arch/arm/mm/proc-sa1100.S * * Copyright (C) 1997-2002 Russell King * @@ -187,11 +187,11 @@ ENTRY(cpu_sa1100_set_pte) eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK orr r2, r2, #PTE_TYPE_SMALL - tst r1, #L_PTE_USER @ User or Exec? + tst r1, #L_PTE_USER @ User? orrne r2, r2, #PTE_SMALL_AP_URO_SRW tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? --- linux-2.6.0-test5/arch/arm/mm/proc-sa110.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-sa110.S 2003-09-18 21:02:00.000000000 -0700 @@ -163,11 +163,11 @@ ENTRY(cpu_sa110_set_pte) eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY - bic r2, r1, #0xff0 - bic r2, r2, #3 + bic r2, r1, #PTE_SMALL_AP_MASK + bic r2, r2, #PTE_TYPE_MASK orr r2, r2, #PTE_TYPE_SMALL - tst r1, #L_PTE_USER @ User or Exec? + tst r1, #L_PTE_USER @ User? orrne r2, r2, #PTE_SMALL_AP_URO_SRW tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? --- linux-2.6.0-test5/arch/arm/mm/proc-xscale.S 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/arm/mm/proc-xscale.S 2003-09-18 21:02:00.000000000 -0700 @@ -236,6 +236,9 @@ ENTRY(xscale_flush_user_cache_range) * * - start - virtual start address * - end - virtual end address + * + * Note: single I-cache line invalidation isn't used here since + * it also trashes the mini I-cache used by JTAG debuggers. */ ENTRY(xscale_coherent_kern_range) bic r0, r0, #CACHELINESIZE - 1 @@ -612,7 +615,7 @@ __xscale_setup: .type xscale_processor_functions, #object ENTRY(xscale_processor_functions) - .word xscale_abort + .word v5t_early_abort .word cpu_xscale_proc_init .word cpu_xscale_proc_fin .word cpu_xscale_reset --- linux-2.6.0-test5/arch/arm/mm/tlb-v4.S 2003-06-14 12:18:06.000000000 -0700 +++ 25/arch/arm/mm/tlb-v4.S 2003-09-18 21:02:00.000000000 -0700 @@ -34,7 +34,6 @@ ENTRY(v4_flush_user_tlb_range) act_mm r3 @ get current->active_mm eors r3, ip, r3 @ == mm ? movne pc, lr @ no, we dont do anything - vma_vm_flags ip, r2 .v4_flush_kern_tlb_range: bic r0, r0, #0x0ff bic r0, r0, #0xf00 --- linux-2.6.0-test5/arch/arm/tools/mach-types 2003-08-08 22:55:10.000000000 -0700 +++ 25/arch/arm/tools/mach-types 2003-09-18 21:02:00.000000000 -0700 @@ -6,7 +6,7 @@ # To add an entry into this database, please see Documentation/arm/README, # or contact rmk@arm.linux.org.uk # -# Last update: Sun Aug 3 16:26:10 2003 +# Last update: Sat Sep 13 00:22:34 2003 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -367,3 +367,20 @@ cx861xx ARCH_CX861XX CX861XX 355 ixp2000 ARCH_IXP2000 IXP2000 356 xda SA1100_XDA XDA 357 csir_ims ARCH_CSIR_IMS CSIR_IMS 358 +ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359 +pocketserv9200 ARCH_POCKETSERV9200 POCKETSERV9200 360 +toto ARCH_TOTO TOTO 361 +s3c2440 ARCH_S3C2440 S3C2440 362 +ks8695p ARCH_KS8695P KS8695P 363 +se4000 ARCH_SE4000 SE4000 364 +quadriceps ARCH_QUADRICEPS QUADRICEPS 365 +bronco ARCH_BRONCO BRONCO 366 +esl_wireless_tab ARCH_ESL_WIRELESS_TABLETESL_WIRELESS_TABLET 367 +esl_sofcomp ARCH_ESL_SOFCOMP ESL_SOFCOMP 368 +s5c7375 ARCH_S5C7375 S5C7375 369 +spearhead ARCH_SPEARHEAD SPEARHEAD 370 +pantera ARCH_PANTERA PANTERA 371 +prayoglite ARCH_PRAYOGLITE PRAYOGLITE 372 +gumstik ARCH_GUMSTIK GUMSTIK 373 +rcube ARCH_RCUBE RCUBE 374 +rea_olv ARCH_REA_OLV REA_OLV 375 --- linux-2.6.0-test5/arch/cris/arch-v10/boot/compressed/misc.c 2003-07-10 18:50:30.000000000 -0700 +++ 25/arch/cris/arch-v10/boot/compressed/misc.c 2003-09-18 21:02:00.000000000 -0700 @@ -115,7 +115,7 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error\n"); + if (size <0) error("Malloc error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ --- linux-2.6.0-test5/arch/cris/mm/ioremap.c 2003-07-10 18:50:30.000000000 -0700 +++ 25/arch/cris/mm/ioremap.c 2003-09-18 21:38:20.000000000 -0700 @@ -157,7 +157,7 @@ void * __ioremap(unsigned long phys_addr if (!area) return NULL; addr = area->addr; - if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { + if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { vfree(addr); return NULL; } --- linux-2.6.0-test5/arch/h8300/kernel/signal.c 2003-08-22 19:23:40.000000000 -0700 +++ 25/arch/h8300/kernel/signal.c 2003-09-18 21:02:26.000000000 -0700 @@ -593,7 +593,7 @@ asmlinkage int do_signal(sigset_t *oldse continue; case SIGTSTP: case SIGTTIN: case SIGTTOU: - if (is_orphaned_pgrp(current->pgrp)) + if (is_orphaned_pgrp(process_group(current))) continue; /* FALLTHRU */ --- linux-2.6.0-test5/arch/i386/boot98/compressed/misc.c 2003-06-14 12:18:01.000000000 -0700 +++ 25/arch/i386/boot98/compressed/misc.c 2003-09-18 21:02:00.000000000 -0700 @@ -132,8 +132,8 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error\n"); - if (free_mem_ptr <= 0) error("Memory error\n"); + if (size <0) error("Malloc error"); + if (free_mem_ptr <= 0) error("Memory error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ @@ -141,7 +141,7 @@ static void *malloc(int size) free_mem_ptr += size; if (free_mem_ptr >= free_mem_end_ptr) - error("\nOut of memory\n"); + error("Out of memory"); return p; } @@ -232,7 +232,7 @@ static void* memcpy(void* __dest, __cons static int fill_inbuf(void) { if (insize != 0) { - error("ran out of input data\n"); + error("ran out of input data"); } inbuf = input_data; @@ -306,9 +306,9 @@ struct { static void setup_normal_output_buffer(void) { #ifdef STANDARD_MEMORY_BIOS_CALL - if (EXT_MEM_K < 1024) error("Less than 2MB of memory.\n"); + if (EXT_MEM_K < 1024) error("Less than 2MB of memory"); #else - if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory.\n"); + if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); #endif output_data = (char *)0x100000; /* Points to 1M */ free_mem_end_ptr = (long)real_mode; @@ -323,9 +323,9 @@ static void setup_output_buffer_if_we_ru { high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE); #ifdef STANDARD_MEMORY_BIOS_CALL - if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory.\n"); + if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); #else - if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory.\n"); + if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); #endif mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX --- linux-2.6.0-test5/arch/i386/boot/compressed/misc.c 2003-06-14 12:18:23.000000000 -0700 +++ 25/arch/i386/boot/compressed/misc.c 2003-09-18 21:02:00.000000000 -0700 @@ -132,8 +132,8 @@ static void *malloc(int size) { void *p; - if (size <0) error("Malloc error\n"); - if (free_mem_ptr <= 0) error("Memory error\n"); + if (size <0) error("Malloc error"); + if (free_mem_ptr <= 0) error("Memory error"); free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ @@ -141,7 +141,7 @@ static void *malloc(int size) free_mem_ptr += size; if (free_mem_ptr >= free_mem_end_ptr) - error("\nOut of memory\n"); + error("Out of memory"); return p; } @@ -232,7 +232,7 @@ static void* memcpy(void* __dest, __cons static int fill_inbuf(void) { if (insize != 0) { - error("ran out of input data\n"); + error("ran out of input data"); } inbuf = input_data; @@ -306,9 +306,9 @@ struct { static void setup_normal_output_buffer(void) { #ifdef STANDARD_MEMORY_BIOS_CALL - if (EXT_MEM_K < 1024) error("Less than 2MB of memory.\n"); + if (EXT_MEM_K < 1024) error("Less than 2MB of memory"); #else - if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory.\n"); + if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); #endif output_data = (char *)0x100000; /* Points to 1M */ free_mem_end_ptr = (long)real_mode; @@ -323,9 +323,9 @@ static void setup_output_buffer_if_we_ru { high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE); #ifdef STANDARD_MEMORY_BIOS_CALL - if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory.\n"); + if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); #else - if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory.\n"); + if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); #endif mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX --- linux-2.6.0-test5/arch/i386/boot/Makefile 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/i386/boot/Makefile 2003-09-18 21:02:19.000000000 -0700 @@ -99,4 +99,4 @@ zlilo: $(BOOTIMAGE) if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi install: $(BOOTIMAGE) - sh $(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" + sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)" --- linux-2.6.0-test5/arch/i386/boot/setup.S 2003-06-14 12:18:22.000000000 -0700 +++ 25/arch/i386/boot/setup.S 2003-09-18 22:02:56.000000000 -0700 @@ -162,7 +162,7 @@ cmd_line_ptr: .long 0 # (Header versio # can be located anywhere in # low memory 0x10000 or higher. -ramdisk_max: .long MAXMEM-1 # (Header version 0x0203 or later) +ramdisk_max: .long __MAXMEM-1 # (Header version 0x0203 or later) # The highest safe address for # the contents of an initrd @@ -506,6 +506,17 @@ no_voyager: movw $0xAA, (0x1ff) # device present no_psmouse: +#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) + movl $0x0000E980, %eax # IST Support + movl $0x47534943, %edx # Request value + int $0x15 + + movl %eax, (96) + movl %ebx, (100) + movl %ecx, (104) + movl %edx, (108) +#endif + #if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) # Then check for an APM BIOS... # %ds points to the bootsector --- linux-2.6.0-test5/arch/i386/Kconfig 2003-09-08 13:58:55.000000000 -0700 +++ 25/arch/i386/Kconfig 2003-09-18 22:02:56.000000000 -0700 @@ -397,6 +397,54 @@ config X86_OOSTORE depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 default y +config X86_4G + bool "4 GB kernel-space and 4 GB user-space virtual memory support" + help + This option is only useful for systems that have more than 1 GB + of RAM. + + The default kernel VM layout leaves 1 GB of virtual memory for + kernel-space mappings, and 3 GB of VM for user-space applications. + This option ups both the kernel-space VM and the user-space VM to + 4 GB. + + The cost of this option is additional TLB flushes done at + system-entry points that transition from user-mode into kernel-mode. + I.e. system calls and page faults, and IRQs that interrupt user-mode + code. There's also additional overhead to kernel operations that copy + memory to/from user-space. The overhead from this is hard to tell and + depends on the workload - it can be anything from no visible overhead + to 20-30% overhead. A good rule of thumb is to count with a runtime + overhead of 20%. + + The upside is the much increased kernel-space VM, which more than + quadruples the maximum amount of RAM supported. Kernels compiled with + this option boot on 64GB of RAM and still have more than 3.1 GB of + 'lowmem' left. Another bonus is that highmem IO bouncing decreases, + if used with drivers that still use bounce-buffers. + + There's also a 33% increase in user-space VM size - database + applications might see a boost from this. + + But the cost of the TLB flushes and the runtime overhead has to be + weighed against the bonuses offered by the larger VM spaces. The + dividing line depends on the actual workload - there might be 4 GB + systems that benefit from this option. Systems with less than 4 GB + of RAM will rarely see a benefit from this option - but it's not + out of question, the exact circumstances have to be considered. + +config X86_SWITCH_PAGETABLES + def_bool X86_4G + +config X86_4G_VM_LAYOUT + def_bool X86_4G + +config X86_UACCESS_INDIRECT + def_bool X86_4G + +config X86_HIGH_ENTRY + def_bool X86_4G + config HUGETLB_PAGE bool "Huge TLB Page Support" help @@ -454,6 +502,7 @@ config SMP config NR_CPUS int "Maximum number of CPUs (2-255)" depends on SMP + default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 default "8" help This allows you to specify the maximum number of CPUs which this @@ -814,49 +863,7 @@ endmenu menu "Power management options (ACPI, APM)" depends on !X86_VOYAGER -config PM - bool "Power Management support" - ---help--- - "Power Management" means that parts of your computer are shut - off or put into a power conserving "sleep" mode if they are not - being used. There are two competing standards for doing this: APM - and ACPI. If you want to use either one, say Y here and then also - to the requisite support below. - - Power Management is most important for battery powered laptop - computers; if you have a laptop, check out the Linux Laptop home - page on the WWW at - and the - Battery Powered Linux mini-HOWTO, available from - . - - Note that, even if you say N here, Linux on the x86 architecture - will issue the hlt instruction if nothing is to be done, thereby - sending the processor to sleep and saving power. - -config SOFTWARE_SUSPEND - bool "Software Suspend (EXPERIMENTAL)" - depends on EXPERIMENTAL && PM && SWAP - ---help--- - Enable the possibilty of suspendig machine. It doesn't need APM. - You may suspend your machine by 'swsusp' or 'shutdown -z