diff -Nru a/Documentation/usb/ov511.txt b/Documentation/usb/ov511.txt --- a/Documentation/usb/ov511.txt Sat Jul 20 12:12:34 2002 +++ b/Documentation/usb/ov511.txt Sat Jul 20 12:12:34 2002 @@ -128,16 +128,6 @@ programs that expect RGB data (e.g. gqcam) to work with this driver. If your colors look VERY wrong, you may want to change this. - NAME: buf_timeout (Temporarily disabled. Memory is deallocated immediately) - TYPE: integer - DEFAULT: 5 (seconds) - DESC: Number of seconds before unused frame buffers are deallocated. - Previously, memory was allocated upon open() and deallocated upon - close(). Deallocation now occurs only if the driver is closed and this - timeout is reached. If you are capturing frames less frequently than - the default timeout, increase this. This will not make any difference - with programs that capture multiple frames during an open/close cycle. - NAME: cams TYPE: integer (1-4 for OV511, 1-31 for OV511+) DEFAULT: 1 @@ -161,13 +151,6 @@ DESC: This configures the camera's sensor to transmit a colored test-pattern instead of an image. This does not work correctly yet. - NAME: sensor_gbr (*** TEMPORARILY DISABLED ***) - TYPE: integer (Boolean) - DEFAULT: 0 - DESC: This makes the sensor output GBR422 instead of YUV420. This saves the - driver the trouble of converting YUV to RGB, but it currently does not - work very well (the colors are not quite right) - NAME: dumppix TYPE: integer (0-2) DEFAULT: 0 @@ -258,14 +241,6 @@ 10 VIDEO_PALETTE_YUV420 (YUV 4:2:0 Planar) 13 VIDEO_PALETTE_YUV422P (YUV 4:2:2 Planar) 15 VIDEO_PALETTE_YUV420P (YUV 4:2:0 Planar, same as 10) - - NAME: tuner - TYPE: integer - DEFAULT: -1 (autodetect) - DESC: This sets the exact type of the tuner module in a device. This is set - automatically based on the custom ID of the OV511 device. In cases - where this fails, you can override this auto-detection. Please see - linux/drivers/media/video/tuner.h for a complete list. NAME: backlight TYPE: integer (Boolean) diff -Nru a/Makefile b/Makefile --- a/Makefile Sat Jul 20 12:12:34 2002 +++ b/Makefile Sat Jul 20 12:12:34 2002 @@ -1,6 +1,6 @@ VERSION = 2 PATCHLEVEL = 5 -SUBLEVEL = 26 +SUBLEVEL = 27 EXTRAVERSION = # *DOCUMENTATION* @@ -157,7 +157,8 @@ export srctree objtree -SUBDIRS := init kernel mm fs ipc lib drivers sound net +SUBDIRS := init kernel mm fs ipc lib drivers sound net security + noconfig_targets := xconfig menuconfig config oldconfig randconfig \ defconfig allyesconfig allnoconfig allmodconfig \ @@ -223,7 +224,7 @@ # --------------------------------------------------------------------------- INIT := init/init.o -CORE_FILES := kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o +CORE_FILES := kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o security/built-in.o LIBS := lib/lib.a DRIVERS := drivers/built-in.o sound/sound.o NETWORKS := net/network.o diff -Nru a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c --- a/arch/arm/kernel/isa.c Sat Jul 20 12:12:35 2002 +++ b/arch/arm/kernel/isa.c Sat Jul 20 12:12:35 2002 @@ -38,7 +38,7 @@ static struct ctl_table_header *isa_sysctl_header; -static ctl_table ctl_isa[2] = {{BUS_ISA, "isa", NULL, 0, 0555, ctl_isa_vars}, +static ctl_table ctl_isa[2] = {{CTL_BUS_ISA, "isa", NULL, 0, 0555, ctl_isa_vars}, {0}}; static ctl_table ctl_bus[2] = {{CTL_BUS, "bus", NULL, 0, 0555, ctl_isa}, {0}}; diff -Nru a/arch/i386/config.in b/arch/i386/config.in --- a/arch/i386/config.in Sat Jul 20 12:12:35 2002 +++ b/arch/i386/config.in Sat Jul 20 12:12:35 2002 @@ -423,4 +423,5 @@ endmenu +source security/Config.in source lib/Config.in diff -Nru a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S --- a/arch/i386/kernel/entry.S Sat Jul 20 12:12:35 2002 +++ b/arch/i386/kernel/entry.S Sat Jul 20 12:12:35 2002 @@ -744,7 +744,7 @@ .long sys_getdents64 /* 220 */ .long sys_fcntl64 .long sys_ni_syscall /* reserved for TUX */ - .long sys_ni_syscall /* reserved for Security */ + .long sys_security /* reserved for Security */ .long sys_gettid .long sys_readahead /* 225 */ .long sys_setxattr diff -Nru a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c --- a/arch/i386/kernel/ptrace.c Sat Jul 20 12:12:35 2002 +++ b/arch/i386/kernel/ptrace.c Sat Jul 20 12:12:35 2002 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -158,6 +159,9 @@ if (request == PTRACE_TRACEME) { /* are we already being traced? */ if (current->ptrace & PT_PTRACED) + goto out; + ret = security_ops->ptrace(current->parent, current); + if (ret) goto out; /* set the ptrace bit in the process flags. */ current->ptrace |= PT_PTRACED; diff -Nru a/drivers/char/agp/Config.help b/drivers/char/agp/Config.help --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/Config.help Sat Jul 20 12:12:35 2002 @@ -0,0 +1,88 @@ +CONFIG_AGP + AGP (Accelerated Graphics Port) is a bus system mainly used to + connect graphics cards to the rest of the system. + + If you have an AGP system and you say Y here, it will be possible to + use the AGP features of your 3D rendering video card. This code acts + as a sort of "AGP driver" for the motherboard's chipset. + + If you need more texture memory than you can get with the AGP GART + (theoretically up to 256 MB, but in practice usually 64 or 128 MB + due to kernel allocation issues), you could use PCI accesses + and have up to a couple gigs of texture space. + + Note that this is the only means to have XFree4/GLX use + write-combining with MTRR support on the AGP bus. Without it, OpenGL + direct rendering will be a lot slower but still faster than PIO. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + + This driver is available as a module. If you want to compile it as + a module, say M here and read . The + module will be called agpgart.o. + +CONFIG_AGP_INTEL + This option gives you AGP support for the GLX component of the + XFree86 4.x on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850 and 860 chipsets. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + +CONFIG_AGP_I810 + This option gives you AGP support for the Xserver on the Intel 810 + 815 and 830m chipset boards for their on-board integrated graphics. This + is required to do any useful video modes with these boards. + +CONFIG_AGP_I460 + This option gives you AGP GART support for the Intel 460GX chipset + for IA64 processors. + +CONFIG_AGP_VIA + This option gives you AGP support for the GLX component of the + XFree86 4.x on VIA MPV3/Apollo Pro chipsets. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + +CONFIG_AGP_AMD + This option gives you AGP support for the GLX component of the + XFree86 4.x on AMD Irongate, 761, and 762 chipsets. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + +CONFIG_AGP_SIS + This option gives you AGP support for the GLX component of the "soon + to be released" XFree86 4.x on Silicon Integrated Systems [SiS] + chipsets. + + Note that 5591/5592 AGP chipsets are NOT supported. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + +CONFIG_AGP_SWORKS + Say Y here to support the Serverworks AGP card. See + for product descriptions and images. + +CONFIG_AGP_ALI + This option gives you AGP support for the GLX component of the + XFree86 4.x on the following ALi chipsets. The supported chipsets + include M1541, M1621, M1631, M1632, M1641,M1647,and M1651. + For the ALi-chipset question, ALi suggests you refer to + . + + The M1541 chipset can do AGP 1x and 2x, but note that there is an + acknowledged incompatibility with Matrox G200 cards. Due to + timing issues, this chipset cannot do AGP 2x with the G200. + This is a hardware limitation. AGP 1x seems to be fine, though. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N. + +CONFIG_AGP_HP_ZX1 + This option gives you AGP GART support for the HP ZX1 chipset + for IA64 processors. + + diff -Nru a/drivers/char/agp/Config.in b/drivers/char/agp/Config.in --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/Config.in Sat Jul 20 12:12:35 2002 @@ -0,0 +1,14 @@ +dep_tristate '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP +if [ "$CONFIG_AGP" != "n" ]; then + bool ' Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support' CONFIG_AGP_INTEL + bool ' Intel I810/I815/I830M (on-board) support' CONFIG_AGP_I810 + bool ' VIA chipset support' CONFIG_AGP_VIA + bool ' AMD Irongate, 761, and 762 support' CONFIG_AGP_AMD + bool ' Generic SiS support' CONFIG_AGP_SIS + bool ' ALI chipset support' CONFIG_AGP_ALI + bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS + if [ "$CONFIG_IA64" = "y" ]; then + bool ' Intel 460GX support' CONFIG_AGP_I460 + bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 + fi +fi diff -Nru a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile --- a/drivers/char/agp/Makefile Sat Jul 20 12:12:35 2002 +++ b/drivers/char/agp/Makefile Sat Jul 20 12:12:35 2002 @@ -3,9 +3,20 @@ # space ioctl interface to use agp memory. It also adds a kernel interface # that other drivers could use to manipulate agp memory. -export-objs := agpgart_be.o +export-objs := agp.o -agpgart-objs := agpgart_fe.o agpgart_be.o +agpgart-y := agp.o frontend.o + +agpgart-$(CONFIG_AGP_INTEL) += i8x0-agp.o +agpgart-$(CONFIG_AGP_I810) += i810-agp.o +agpgart-$(CONFIG_AGP_VIA) += via-agp.o +agpgart-$(CONFIG_AGP_AMD) += amd-agp.o +agpgart-$(CONFIG_AGP_SIS) += sis-agp.o +agpgart-$(CONFIG_AGP_ALI) += ali-agp.o +agpgart-$(CONFIG_AGP_SWORKS) += sworks-agp.o +agpgart-$(CONFIG_AGP_I460) += i460-agp.o +agpgart-$(CONFIG_AGP_HP_ZX1) += hp-agp.o +agpgart-objs := $(agpgart-y) obj-$(CONFIG_AGP) += agpgart.o diff -Nru a/drivers/char/agp/agp.c b/drivers/char/agp/agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/agp.c Sat Jul 20 12:12:34 2002 @@ -0,0 +1,1662 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" + +MODULE_AUTHOR("Jeff Hartmann "); +MODULE_PARM(agp_try_unsupported, "1i"); +MODULE_LICENSE("GPL and additional rights"); +EXPORT_SYMBOL(agp_free_memory); +EXPORT_SYMBOL(agp_allocate_memory); +EXPORT_SYMBOL(agp_copy_info); +EXPORT_SYMBOL(agp_bind_memory); +EXPORT_SYMBOL(agp_unbind_memory); +EXPORT_SYMBOL(agp_enable); +EXPORT_SYMBOL(agp_backend_acquire); +EXPORT_SYMBOL(agp_backend_release); + +struct agp_bridge_data agp_bridge = { type: NOT_SUPPORTED }; +static int agp_try_unsupported __initdata = 0; + +int agp_backend_acquire(void) +{ + if (agp_bridge.type == NOT_SUPPORTED) + return -EINVAL; + + atomic_inc(&agp_bridge.agp_in_use); + + if (atomic_read(&agp_bridge.agp_in_use) != 1) { + atomic_dec(&agp_bridge.agp_in_use); + return -EBUSY; + } + MOD_INC_USE_COUNT; + return 0; +} + +void agp_backend_release(void) +{ + if (agp_bridge.type == NOT_SUPPORTED) + return; + + atomic_dec(&agp_bridge.agp_in_use); + MOD_DEC_USE_COUNT; +} + +/* + * Generic routines for handling agp_memory structures - + * They use the basic page allocation routines to do the + * brunt of the work. + */ + + +void agp_free_key(int key) +{ + + if (key < 0) + return; + + if (key < MAXKEY) + clear_bit(key, agp_bridge.key_list); +} + +static int agp_get_key(void) +{ + int bit; + + bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY); + if (bit < MAXKEY) { + set_bit(bit, agp_bridge.key_list); + return bit; + } + return -1; +} + +agp_memory *agp_create_memory(int scratch_pages) +{ + agp_memory *new; + + new = kmalloc(sizeof(agp_memory), GFP_KERNEL); + + if (new == NULL) + return NULL; + + memset(new, 0, sizeof(agp_memory)); + new->key = agp_get_key(); + + if (new->key < 0) { + kfree(new); + return NULL; + } + new->memory = vmalloc(PAGE_SIZE * scratch_pages); + + if (new->memory == NULL) { + agp_free_key(new->key); + kfree(new); + return NULL; + } + new->num_scratch_pages = scratch_pages; + return new; +} + +void agp_free_memory(agp_memory * curr) +{ + int i; + + if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) + return; + + if (curr->is_bound == TRUE) + agp_unbind_memory(curr); + + if (curr->type != 0) { + agp_bridge.free_by_type(curr); + return; + } + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + curr->memory[i] &= ~(0x00000fff); + agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[i])); + } + } + agp_free_key(curr->key); + vfree(curr->memory); + kfree(curr); + MOD_DEC_USE_COUNT; +} + +#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) + +agp_memory *agp_allocate_memory(size_t page_count, u32 type) +{ + int scratch_pages; + agp_memory *new; + int i; + + if (agp_bridge.type == NOT_SUPPORTED) + return NULL; + + if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) > + agp_bridge.max_memory_agp) { + return NULL; + } + + if (type != 0) { + new = agp_bridge.alloc_by_type(page_count, type); + return new; + } + /* We always increase the module count, since free auto-decrements + * it + */ + + MOD_INC_USE_COUNT; + + scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; + + new = agp_create_memory(scratch_pages); + + if (new == NULL) { + MOD_DEC_USE_COUNT; + return NULL; + } + + for (i = 0; i < page_count; i++) { + void *addr = agp_bridge.agp_alloc_page(); + + if (addr == NULL) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = agp_bridge.mask_memory(virt_to_phys(addr), type); + new->page_count++; + } + + flush_agp_mappings(); + + return new; +} + +/* End - Generic routines for handling agp_memory structures */ + +static int agp_return_size(void) +{ + int current_size; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + current_size = A_SIZE_8(temp)->size; + break; + case U16_APER_SIZE: + current_size = A_SIZE_16(temp)->size; + break; + case U32_APER_SIZE: + current_size = A_SIZE_32(temp)->size; + break; + case LVL2_APER_SIZE: + current_size = A_SIZE_LVL2(temp)->size; + break; + case FIXED_APER_SIZE: + current_size = A_SIZE_FIX(temp)->size; + break; + default: + current_size = 0; + break; + } + + return current_size; +} + +/* Routine to copy over information structure */ + +void agp_copy_info(agp_kern_info * info) +{ + unsigned long page_mask = 0; + int i; + + memset(info, 0, sizeof(agp_kern_info)); + if (agp_bridge.type == NOT_SUPPORTED) { + info->chipset = agp_bridge.type; + return; + } + info->version.major = agp_bridge.version->major; + info->version.minor = agp_bridge.version->minor; + info->device = agp_bridge.dev; + info->chipset = agp_bridge.type; + info->mode = agp_bridge.mode; + info->aper_base = agp_bridge.gart_bus_addr; + info->aper_size = agp_return_size(); + info->max_memory = agp_bridge.max_memory_agp; + info->current_memory = atomic_read(&agp_bridge.current_memory_agp); + info->cant_use_aperture = agp_bridge.cant_use_aperture; + + for(i = 0; i < agp_bridge.num_of_masks; i++) + page_mask |= agp_bridge.mask_memory(page_mask, i); + + info->page_mask = ~page_mask; +} + +/* End - Routine to copy over information structure */ + +/* + * Routines for handling swapping of agp_memory into the GATT - + * These routines take agp_memory and insert them into the GATT. + * They call device specific routines to actually write to the GATT. + */ + +int agp_bind_memory(agp_memory * curr, off_t pg_start) +{ + int ret_val; + + if ((agp_bridge.type == NOT_SUPPORTED) || + (curr == NULL) || (curr->is_bound == TRUE)) { + return -EINVAL; + } + if (curr->is_flushed == FALSE) { + CACHE_FLUSH(); + curr->is_flushed = TRUE; + } + ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type); + + if (ret_val != 0) + return ret_val; + + curr->is_bound = TRUE; + curr->pg_start = pg_start; + return 0; +} + +int agp_unbind_memory(agp_memory * curr) +{ + int ret_val; + + if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) + return -EINVAL; + + if (curr->is_bound != TRUE) + return -EINVAL; + + ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type); + + if (ret_val != 0) + return ret_val; + + curr->is_bound = FALSE; + curr->pg_start = 0; + return 0; +} + +/* End - Routines for handling swapping of agp_memory into the GATT */ + +/* + * Driver routines - start + * Currently this module supports the following chipsets: + * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3, + * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541, + * and generic support for the SiS chipsets. + */ + +/* Generic Agp routines - Start */ + +void agp_generic_agp_enable(u32 mode) +{ + struct pci_dev *device = NULL; + u32 command, scratch; + u8 cap_ptr; + + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 4, &command); + + /* + * PASS1: go throu all devices that claim to be + * AGP devices and collect their data. + */ + + + pci_for_each_dev(device) { + cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); + if (cap_ptr != 0x00) { + /* + * Ok, here we have a AGP device. Disable impossible + * settings, and adjust the readqueue to the minimum. + */ + + pci_read_config_dword(device, cap_ptr + 4, &scratch); + + /* adjust RQ depth */ + command = ((command & ~0xff000000) | + min_t(u32, (mode & 0xff000000), + min_t(u32, (command & 0xff000000), + (scratch & 0xff000000)))); + + /* disable SBA if it's not supported */ + if (!((command & 0x00000200) && + (scratch & 0x00000200) && + (mode & 0x00000200))) + command &= ~0x00000200; + + /* disable FW if it's not supported */ + if (!((command & 0x00000010) && + (scratch & 0x00000010) && + (mode & 0x00000010))) + command &= ~0x00000010; + + if (!((command & 4) && + (scratch & 4) && + (mode & 4))) + command &= ~0x00000004; + + if (!((command & 2) && + (scratch & 2) && + (mode & 2))) + command &= ~0x00000002; + + if (!((command & 1) && + (scratch & 1) && + (mode & 1))) + command &= ~0x00000001; + } + } + /* + * PASS2: Figure out the 4X/2X/1X setting and enable the + * target (our motherboard chipset). + */ + + if (command & 4) + command &= ~3; /* 4X */ + + if (command & 2) + command &= ~5; /* 2X */ + + if (command & 1) + command &= ~6; /* 1X */ + + command |= 0x00000100; + + pci_write_config_dword(agp_bridge.dev, + agp_bridge.capndx + 8, + command); + + /* + * PASS3: Go throu all AGP devices and update the + * command registers. + */ + + pci_for_each_dev(device) { + cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); + if (cap_ptr != 0x00) + pci_write_config_dword(device, cap_ptr + 8, command); + } +} + +int agp_generic_create_gatt_table(void) +{ + char *table; + char *table_end; + int size; + int page_order; + int num_entries; + int i; + void *temp; + struct page *page; + + /* The generic routines can't handle 2 level gatt's */ + if (agp_bridge.size_type == LVL2_APER_SIZE) { + return -EINVAL; + } + + table = NULL; + i = agp_bridge.aperture_size_idx; + temp = agp_bridge.current_size; + size = page_order = num_entries = 0; + + if (agp_bridge.size_type != FIXED_APER_SIZE) { + do { + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + size = A_SIZE_8(temp)->size; + page_order = + A_SIZE_8(temp)->page_order; + num_entries = + A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + size = A_SIZE_16(temp)->size; + page_order = A_SIZE_16(temp)->page_order; + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + size = A_SIZE_32(temp)->size; + page_order = A_SIZE_32(temp)->page_order; + num_entries = A_SIZE_32(temp)->num_entries; + break; + /* This case will never really happen. */ + case FIXED_APER_SIZE: + case LVL2_APER_SIZE: + default: + size = page_order = num_entries = 0; + break; + } + + table = (char *) __get_free_pages(GFP_KERNEL, + page_order); + + if (table == NULL) { + i++; + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + agp_bridge.current_size = A_IDX8(); + break; + case U16_APER_SIZE: + agp_bridge.current_size = A_IDX16(); + break; + case U32_APER_SIZE: + agp_bridge.current_size = A_IDX32(); + break; + /* This case will never really + * happen. + */ + case FIXED_APER_SIZE: + case LVL2_APER_SIZE: + default: + agp_bridge.current_size = + agp_bridge.current_size; + break; + } + temp = agp_bridge.current_size; + } else { + agp_bridge.aperture_size_idx = i; + } + } while ((table == NULL) && + (i < agp_bridge.num_aperture_sizes)); + } else { + size = ((struct aper_size_info_fixed *) temp)->size; + page_order = ((struct aper_size_info_fixed *) temp)->page_order; + num_entries = ((struct aper_size_info_fixed *) temp)->num_entries; + table = (char *) __get_free_pages(GFP_KERNEL, page_order); + } + + if (table == NULL) + return -ENOMEM; + + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + SetPageReserved(page); + + agp_bridge.gatt_table_real = (unsigned long *) table; + CACHE_FLUSH(); + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + CACHE_FLUSH(); + + if (agp_bridge.gatt_table == NULL) { + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + ClearPageReserved(page); + + free_pages((unsigned long) table, page_order); + + return -ENOMEM; + } + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) + agp_bridge.gatt_table[i] = (unsigned long) agp_bridge.scratch_page; + + return 0; +} + +int agp_generic_suspend(void) +{ + return 0; +} + +void agp_generic_resume(void) +{ + return; +} + +int agp_generic_free_gatt_table(void) +{ + int page_order; + char *table, *table_end; + void *temp; + struct page *page; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + page_order = A_SIZE_8(temp)->page_order; + break; + case U16_APER_SIZE: + page_order = A_SIZE_16(temp)->page_order; + break; + case U32_APER_SIZE: + page_order = A_SIZE_32(temp)->page_order; + break; + case FIXED_APER_SIZE: + page_order = A_SIZE_FIX(temp)->page_order; + break; + case LVL2_APER_SIZE: + /* The generic routines can't deal with 2 level gatt's */ + return -EINVAL; + break; + default: + page_order = 0; + break; + } + + /* Do not worry about freeing memory, because if this is + * called, then all agp memory is deallocated and removed + * from the table. + */ + + iounmap(agp_bridge.gatt_table); + table = (char *) agp_bridge.gatt_table_real; + table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); + + for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) + ClearPageReserved(page); + + free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); + return 0; +} + +int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type) +{ + int i, j, num_entries; + void *temp; + + temp = agp_bridge.current_size; + + switch (agp_bridge.size_type) { + case U8_APER_SIZE: + num_entries = A_SIZE_8(temp)->num_entries; + break; + case U16_APER_SIZE: + num_entries = A_SIZE_16(temp)->num_entries; + break; + case U32_APER_SIZE: + num_entries = A_SIZE_32(temp)->num_entries; + break; + case FIXED_APER_SIZE: + num_entries = A_SIZE_FIX(temp)->num_entries; + break; + case LVL2_APER_SIZE: + /* The generic routines can't deal with 2 level gatt's */ + return -EINVAL; + break; + default: + num_entries = 0; + break; + } + + if (type != 0 || mem->type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + + if ((pg_start + mem->page_count) > num_entries) + return -EINVAL; + + j = pg_start; + + while (j < (pg_start + mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) + agp_bridge.gatt_table[j] = mem->memory[i]; + + agp_bridge.tlb_flush(mem); + return 0; +} + +int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, int type) +{ + int i; + + if (type != 0 || mem->type != 0) { + /* The generic routines know nothing of memory types */ + return -EINVAL; + } + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + agp_bridge.gatt_table[i] = + (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) +{ + return NULL; +} + +void agp_generic_free_by_type(agp_memory * curr) +{ + if (curr->memory != NULL) + vfree(curr->memory); + + agp_free_key(curr->key); + kfree(curr); +} + +/* + * Basic Page Allocation Routines - + * These routines handle page allocation + * and by default they reserve the allocated + * memory. They also handle incrementing the + * current_memory_agp value, Which is checked + * against a maximum value. + */ + +void *agp_generic_alloc_page(void) +{ + struct page * page; + + page = alloc_page(GFP_KERNEL); + if (page == NULL) + return 0; + + map_page_into_agp(page); + + get_page(page); + SetPageLocked(page); + atomic_inc(&agp_bridge.current_memory_agp); + return page_address(page); +} + +void agp_generic_destroy_page(void *addr) +{ + struct page *page; + + if (addr == NULL) + return; + + page = virt_to_page(addr); + unmap_page_from_agp(page); + put_page(page); + unlock_page(page); + free_page((unsigned long)addr); + atomic_dec(&agp_bridge.current_memory_agp); +} + +/* End Basic Page Allocation Routines */ + +void agp_enable(u32 mode) +{ + if (agp_bridge.type == NOT_SUPPORTED) + return; + agp_bridge.agp_enable(mode); +} + +/* End - Generic Agp routines */ + + +/* per-chipset initialization data. + * note -- all chipsets for a single vendor MUST be grouped together + */ +static struct { + unsigned short device_id; /* first, to make table easier to read */ + unsigned short vendor_id; + enum chipset_type chipset; + const char *vendor_name; + const char *chipset_name; + int (*chipset_setup) (struct pci_dev *pdev); +} agp_bridge_info[] __initdata = { + +#ifdef CONFIG_AGP_ALI + { + device_id: PCI_DEVICE_ID_AL_M1541_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1541, + vendor_name: "Ali", + chipset_name: "M1541", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1621_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1621, + vendor_name: "Ali", + chipset_name: "M1621", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1631_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1631, + vendor_name: "Ali", + chipset_name: "M1631", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1632_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1632, + vendor_name: "Ali", + chipset_name: "M1632", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1641_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1641, + vendor_name: "Ali", + chipset_name: "M1641", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1644_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1644, + vendor_name: "Ali", + chipset_name: "M1644", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1647_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1647, + vendor_name: "Ali", + chipset_name: "M1647", + chipset_setup: ali_generic_setup, + }, + { + device_id: PCI_DEVICE_ID_AL_M1651_0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_M1651, + vendor_name: "Ali", + chipset_name: "M1651", + chipset_setup: ali_generic_setup, + }, + { + device_id: 0, + vendor_id: PCI_VENDOR_ID_AL, + chipset: ALI_GENERIC, + vendor_name: "Ali", + chipset_name: "Generic", + chipset_setup: ali_generic_setup, + }, +#endif /* CONFIG_AGP_ALI */ + +#ifdef CONFIG_AGP_AMD + { + device_id: PCI_DEVICE_ID_AMD_IRONGATE_0, + vendor_id: PCI_VENDOR_ID_AMD, + chipset: AMD_IRONGATE, + vendor_name: "AMD", + chipset_name: "Irongate", + chipset_setup: amd_irongate_setup, + }, + { + device_id: PCI_DEVICE_ID_AMD_761_0, + vendor_id: PCI_VENDOR_ID_AMD, + chipset: AMD_761, + vendor_name: "AMD", + chipset_name: "761", + chipset_setup: amd_irongate_setup, + }, + { + device_id: PCI_DEVICE_ID_AMD_762_0, + vendor_id: PCI_VENDOR_ID_AMD, + chipset: AMD_762, + vendor_name: "AMD", + chipset_name: "760MP", + chipset_setup: amd_irongate_setup, + }, + { + device_id: 0, + vendor_id: PCI_VENDOR_ID_AMD, + chipset: AMD_GENERIC, + vendor_name: "AMD", + chipset_name: "Generic", + chipset_setup: amd_irongate_setup, + }, +#endif /* CONFIG_AGP_AMD */ + +#ifdef CONFIG_AGP_INTEL + { + device_id: PCI_DEVICE_ID_INTEL_82443LX_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_LX, + vendor_name: "Intel", + chipset_name: "440LX", + chipset_setup: intel_generic_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_82443BX_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_BX, + vendor_name: "Intel", + chipset_name: "440BX", + chipset_setup: intel_generic_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_82443GX_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_GX, + vendor_name: "Intel", + chipset_name: "440GX", + chipset_setup: intel_generic_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_815_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I815, + vendor_name: "Intel", + chipset_name: "i815", + chipset_setup: intel_815_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_820_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I820, + vendor_name: "Intel", + chipset_name: "i820", + chipset_setup: intel_820_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_820_UP_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I820, + vendor_name: "Intel", + chipset_name: "i820", + chipset_setup: intel_820_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_830_M_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I830_M, + vendor_name: "Intel", + chipset_name: "i830M", + chipset_setup: intel_830mp_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_845_G_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I845_G, + vendor_name: "Intel", + chipset_name: "i845G", + chipset_setup: intel_830mp_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_840_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I840, + vendor_name: "Intel", + chipset_name: "i840", + chipset_setup: intel_840_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_845_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I845, + vendor_name: "Intel", + chipset_name: "i845", + chipset_setup: intel_845_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_850_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I850, + vendor_name: "Intel", + chipset_name: "i850", + chipset_setup: intel_850_setup + }, + { + device_id: PCI_DEVICE_ID_INTEL_860_0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_I860, + vendor_name: "Intel", + chipset_name: "i860", + chipset_setup: intel_860_setup + }, + { + device_id: 0, + vendor_id: PCI_VENDOR_ID_INTEL, + chipset: INTEL_GENERIC, + vendor_name: "Intel", + chipset_name: "Generic", + chipset_setup: intel_generic_setup + }, + +#endif /* CONFIG_AGP_INTEL */ + +#ifdef CONFIG_AGP_SIS + { + device_id: PCI_DEVICE_ID_SI_740, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "740", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_650, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "650", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_645, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "645", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_735, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "735", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_745, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "745", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_730, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "730", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_630, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "630", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_540, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "540", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_620, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "620", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_530, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "530", + chipset_setup: sis_generic_setup + }, + { + device_id: PCI_DEVICE_ID_SI_550, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "550", + chipset_setup: sis_generic_setup + }, + { + device_id: 0, + vendor_id: PCI_VENDOR_ID_SI, + chipset: SIS_GENERIC, + vendor_name: "SiS", + chipset_name: "Generic", + chipset_setup: sis_generic_setup + }, +#endif /* CONFIG_AGP_SIS */ + +#ifdef CONFIG_AGP_VIA + { + device_id: PCI_DEVICE_ID_VIA_8501_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_MVP4, + vendor_name: "Via", + chipset_name: "MVP4", + chipset_setup: via_generic_setup + }, + { + device_id: PCI_DEVICE_ID_VIA_82C597_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_VP3, + vendor_name: "Via", + chipset_name: "VP3", + chipset_setup: via_generic_setup + }, + { + device_id: PCI_DEVICE_ID_VIA_82C598_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_MVP3, + vendor_name: "Via", + chipset_name: "MVP3", + chipset_setup: via_generic_setup + }, + { + device_id: PCI_DEVICE_ID_VIA_82C691_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_APOLLO_PRO, + vendor_name: "Via", + chipset_name: "Apollo Pro", + chipset_setup: via_generic_setup + }, + { + device_id: PCI_DEVICE_ID_VIA_8371_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_APOLLO_KX133, + vendor_name: "Via", + chipset_name: "Apollo Pro KX133", + chipset_setup: via_generic_setup + }, + { + device_id: PCI_DEVICE_ID_VIA_8363_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_APOLLO_KT133, + vendor_name: "Via", + chipset_name: "Apollo Pro KT133", + chipset_setup: via_generic_setup + }, + { + device_id: PCI_DEVICE_ID_VIA_8367_0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_APOLLO_KT133, + vendor_name: "Via", + chipset_name: "Apollo Pro KT266", + chipset_setup: via_generic_setup + }, + { + device_id: 0, + vendor_id: PCI_VENDOR_ID_VIA, + chipset: VIA_GENERIC, + vendor_name: "Via", + chipset_name: "Generic", + chipset_setup: via_generic_setup + }, +#endif /* CONFIG_AGP_VIA */ + +#ifdef CONFIG_AGP_HP_ZX1 + { + device_id: PCI_DEVICE_ID_HP_ZX1_LBA, + vendor_id: PCI_VENDOR_ID_HP, + chipset: HP_ZX1, + vendor_name: "HP", + chipset_name: "ZX1", + chipset_setup: hp_zx1_setup + }, +#endif + + { }, /* dummy final entry, always present */ +}; + + +/* scan table above for supported devices */ +static int __init agp_lookup_host_bridge (struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) + if (pdev->vendor == agp_bridge_info[i].vendor_id) + break; + + if (i >= ARRAY_SIZE (agp_bridge_info)) { + printk (KERN_DEBUG PFX "unsupported bridge\n"); + return -ENODEV; + } + + while ((i < ARRAY_SIZE (agp_bridge_info)) && + (agp_bridge_info[i].vendor_id == pdev->vendor)) { + if (pdev->device == agp_bridge_info[i].device_id) { +#ifdef CONFIG_AGP_ALI + if (pdev->device == PCI_DEVICE_ID_AL_M1621_0) { + u8 hidden_1621_id; + + pci_read_config_byte(pdev, 0xFB, &hidden_1621_id); + switch (hidden_1621_id) { + case 0x31: + agp_bridge_info[i].chipset_name="M1631"; + break; + case 0x32: + agp_bridge_info[i].chipset_name="M1632"; + break; + case 0x41: + agp_bridge_info[i].chipset_name="M1641"; + break; + case 0x43: + break; + case 0x47: + agp_bridge_info[i].chipset_name="M1647"; + break; + case 0x51: + agp_bridge_info[i].chipset_name="M1651"; + break; + default: + break; + } + } +#endif + + printk (KERN_INFO PFX "Detected %s %s chipset\n", + agp_bridge_info[i].vendor_name, + agp_bridge_info[i].chipset_name); + agp_bridge.type = agp_bridge_info[i].chipset; + return agp_bridge_info[i].chipset_setup (pdev); + } + + i++; + } + + i--; /* point to vendor generic entry (device_id == 0) */ + + /* try init anyway, if user requests it AND + * there is a 'generic' bridge entry for this vendor */ + if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) { + printk(KERN_WARNING PFX "Trying generic %s routines" + " for device id: %04x\n", + agp_bridge_info[i].vendor_name, pdev->device); + agp_bridge.type = agp_bridge_info[i].chipset; + return agp_bridge_info[i].chipset_setup (pdev); + } + + printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x)," + " you might want to try agp_try_unsupported=1.\n", + agp_bridge_info[i].vendor_name, pdev->device); + return -ENODEV; +} + + +/* Supported Device Scanning routine */ + +static int __init agp_find_supported_device(struct pci_dev *dev) +{ + u8 cap_ptr = 0x00; + + agp_bridge.dev = dev; + + /* Need to test for I810 here */ +#ifdef CONFIG_AGP_I810 + if (dev->vendor == PCI_VENDOR_ID_INTEL) { + struct pci_dev *i810_dev; + + switch (dev->device) { + case PCI_DEVICE_ID_INTEL_810_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_1, + NULL); + if (i810_dev == NULL) { + printk(KERN_ERR PFX "Detected an Intel i810," + " but could not find the secondary" + " device.\n"); + return -ENODEV; + } + printk(KERN_INFO PFX "Detected an Intel " + "i810 Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i810_setup (i810_dev); + + case PCI_DEVICE_ID_INTEL_810_DC100_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_DC100_1, + NULL); + if (i810_dev == NULL) { + printk(KERN_ERR PFX "Detected an Intel i810 " + "DC100, but could not find the " + "secondary device.\n"); + return -ENODEV; + } + printk(KERN_INFO PFX "Detected an Intel i810 " + "DC100 Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i810_setup(i810_dev); + + case PCI_DEVICE_ID_INTEL_810_E_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_810_E_1, + NULL); + if (i810_dev == NULL) { + printk(KERN_ERR PFX "Detected an Intel i810 E" + ", but could not find the secondary " + "device.\n"); + return -ENODEV; + } + printk(KERN_INFO PFX "Detected an Intel i810 E " + "Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i810_setup(i810_dev); + + case PCI_DEVICE_ID_INTEL_815_0: + /* The i815 can operate either as an i810 style + * integrated device, or as an AGP4X motherboard. + * + * This only addresses the first mode: + */ + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_815_1, + NULL); + if (i810_dev == NULL) { + printk(KERN_ERR PFX "agpgart: Detected an " + "Intel i815, but could not find the" + " secondary device. Assuming a " + "non-integrated video card.\n"); + break; + } + printk(KERN_INFO PFX "agpgart: Detected an Intel i815 " + "Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i810_setup(i810_dev); + + case PCI_DEVICE_ID_INTEL_845_G_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_845_G_1, NULL); + if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_845_G_1, i810_dev); + } + + if (i810_dev == NULL) { + /* + * We probably have a I845MP chipset + * with an external graphics + * card. It will be initialized later + */ + agp_bridge.type = INTEL_I845_G; + break; + } + printk(KERN_INFO PFX "Detected an Intel " + "845G Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i830_setup(i810_dev); + + case PCI_DEVICE_ID_INTEL_830_M_0: + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_830_M_1, + NULL); + if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { + i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_830_M_1, + i810_dev); + } + + if (i810_dev == NULL) { + /* Intel 830MP with external graphic card */ + /* It will be initialized later */ + agp_bridge.type = INTEL_I830_M; + break; + } + printk(KERN_INFO PFX "Detected an Intel " + "830M Chipset.\n"); + agp_bridge.type = INTEL_I810; + return intel_i830_setup(i810_dev); + default: + break; + } + } +#endif /* CONFIG_AGP_I810 */ + +#ifdef CONFIG_AGP_SWORKS + /* Everything is on func 1 here so we are hardcoding function one */ + if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) { + struct pci_dev *bridge_dev; + + bridge_dev = pci_find_slot ((unsigned int)dev->bus->number, + PCI_DEVFN(0, 1)); + if(bridge_dev == NULL) { + printk(KERN_INFO PFX "agpgart: Detected a Serverworks " + "Chipset, but could not find the secondary " + "device.\n"); + return -ENODEV; + } + + switch (dev->device) { + case PCI_DEVICE_ID_SERVERWORKS_HE: + agp_bridge.type = SVWRKS_HE; + return serverworks_setup(bridge_dev); + + case PCI_DEVICE_ID_SERVERWORKS_LE: + case 0x0007: + agp_bridge.type = SVWRKS_LE; + return serverworks_setup(bridge_dev); + + default: + if(agp_try_unsupported) { + agp_bridge.type = SVWRKS_GENERIC; + return serverworks_setup(bridge_dev); + } + break; + } + } + +#endif /* CONFIG_AGP_SWORKS */ + +#ifdef CONFIG_AGP_HP_ZX1 + if (dev->vendor == PCI_VENDOR_ID_HP) { + /* ZX1 LBAs can be either PCI or AGP bridges */ + if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { + printk(KERN_INFO PFX "Detected HP ZX1 AGP " + "chipset at %s\n", dev->slot_name); + agp_bridge.type = HP_ZX1; + agp_bridge.dev = dev; + return hp_zx1_setup(dev); + } + return -ENODEV; + } +#endif /* CONFIG_AGP_HP_ZX1 */ + + /* find capndx */ + cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); + if (cap_ptr == 0x00) + return -ENODEV; + agp_bridge.capndx = cap_ptr; + + /* Fill in the mode register */ + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + 4, + &agp_bridge.mode); + + /* probe for known chipsets */ + return agp_lookup_host_bridge (dev); +} + +struct agp_max_table { + int mem; + int agp; +}; + +static struct agp_max_table maxes_table[9] __initdata = +{ + {0, 0}, + {32, 4}, + {64, 28}, + {128, 96}, + {256, 204}, + {512, 440}, + {1024, 942}, + {2048, 1920}, + {4096, 3932} +}; + +static int __init agp_find_max (void) +{ + long memory, index, result; + + memory = virt_to_phys(high_memory) >> 20; + index = 1; + + while ((memory > maxes_table[index].mem) && + (index < 8)) { + index++; + } + + result = maxes_table[index - 1].agp + + ( (memory - maxes_table[index - 1].mem) * + (maxes_table[index].agp - maxes_table[index - 1].agp)) / + (maxes_table[index].mem - maxes_table[index - 1].mem); + + printk(KERN_INFO PFX "Maximum main memory to use " + "for agp memory: %ldM\n", result); + result = result << (20 - PAGE_SHIFT); + return result; +} + +#define AGPGART_VERSION_MAJOR 0 +#define AGPGART_VERSION_MINOR 99 + +static struct agp_version agp_current_version = +{ + major: AGPGART_VERSION_MAJOR, + minor: AGPGART_VERSION_MINOR, +}; + +static int __init agp_backend_initialize(struct pci_dev *dev) +{ + int size_value, rc, got_gatt=0, got_keylist=0; + + memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); + agp_bridge.type = NOT_SUPPORTED; + agp_bridge.max_memory_agp = agp_find_max(); + agp_bridge.version = &agp_current_version; + + rc = agp_find_supported_device(dev); + if (rc) { + /* not KERN_ERR because error msg should have already printed */ + printk(KERN_DEBUG PFX "no supported devices found.\n"); + return rc; + } + + if (agp_bridge.needs_scratch_page == TRUE) { + void *addr; + addr = agp_bridge.agp_alloc_page(); + + if (addr == NULL) { + printk(KERN_ERR PFX "unable to get memory for " + "scratch page.\n"); + return -ENOMEM; + } + agp_bridge.scratch_page = virt_to_phys(addr); + agp_bridge.scratch_page = + agp_bridge.mask_memory(agp_bridge.scratch_page, 0); + } + + size_value = agp_bridge.fetch_size(); + + if (size_value == 0) { + printk(KERN_ERR PFX "unable to determine aperture size.\n"); + rc = -EINVAL; + goto err_out; + } + if (agp_bridge.create_gatt_table()) { + printk(KERN_ERR PFX "unable to get memory for graphics " + "translation table.\n"); + rc = -ENOMEM; + goto err_out; + } + got_gatt = 1; + + agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); + if (agp_bridge.key_list == NULL) { + printk(KERN_ERR PFX "error allocating memory for key lists.\n"); + rc = -ENOMEM; + goto err_out; + } + got_keylist = 1; + + /* FIXME vmalloc'd memory not guaranteed contiguous */ + memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); + + if (agp_bridge.configure()) { + printk(KERN_ERR PFX "error configuring host chipset.\n"); + rc = -EINVAL; + goto err_out; + } + + printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", + size_value, agp_bridge.gart_bus_addr); + + return 0; + +err_out: + if (agp_bridge.needs_scratch_page == TRUE) { + agp_bridge.scratch_page &= ~(0x00000fff); + agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page)); + } + if (got_gatt) + agp_bridge.free_gatt_table(); + if (got_keylist) + vfree(agp_bridge.key_list); + return rc; +} + + +/* cannot be __exit b/c as it could be called from __init code */ +static void agp_backend_cleanup(void) +{ + agp_bridge.cleanup(); + agp_bridge.free_gatt_table(); + vfree(agp_bridge.key_list); + + if (agp_bridge.needs_scratch_page == TRUE) { + agp_bridge.scratch_page &= ~(0x00000fff); + agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page)); + } +} + +static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data) +{ + switch(rq) + { + case PM_SUSPEND: + return agp_bridge.suspend(); + case PM_RESUME: + agp_bridge.resume(); + return 0; + } + return 0; +} + +extern int agp_frontend_initialize(void); +extern void agp_frontend_cleanup(void); + +static const drm_agp_t drm_agp = { + &agp_free_memory, + &agp_allocate_memory, + &agp_bind_memory, + &agp_unbind_memory, + &agp_enable, + &agp_backend_acquire, + &agp_backend_release, + &agp_copy_info +}; + +static int agp_probe (struct pci_dev *dev, const struct pci_device_id *ent) +{ + int ret_val; + + if (agp_bridge.type != NOT_SUPPORTED) { + printk (KERN_DEBUG PFX "Oops, don't init more than one agpgart device.\n"); + return -ENODEV; + } + + ret_val = agp_backend_initialize(dev); + if (ret_val) { + agp_bridge.type = NOT_SUPPORTED; + return ret_val; + } + ret_val = agp_frontend_initialize(); + if (ret_val) { + agp_bridge.type = NOT_SUPPORTED; + agp_backend_cleanup(); + return ret_val; + } + + inter_module_register("drm_agp", THIS_MODULE, &drm_agp); + + pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power); + return 0; +} + +static struct pci_device_id agp_pci_table[] __initdata = { + { + class: (PCI_CLASS_BRIDGE_HOST << 8), + class_mask: ~0, + vendor: PCI_ANY_ID, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_pci_table); + +static struct pci_driver agp_pci_driver = { + name: "agpgart", + id_table: agp_pci_table, + probe: agp_probe, +}; + +static int __init agp_init(void) +{ + int ret_val; + + printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", + AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); + + ret_val = pci_module_init(&agp_pci_driver); + if (ret_val) { + agp_bridge.type = NOT_SUPPORTED; + return ret_val; + } + return 0; +} + +static void __exit agp_cleanup(void) +{ + pci_unregister_driver(&agp_pci_driver); + if (agp_bridge.type != NOT_SUPPORTED) { + pm_unregister_all(agp_power); + agp_frontend_cleanup(); + agp_backend_cleanup(); + inter_module_unregister("drm_agp"); + } +} + +module_init(agp_init); +module_exit(agp_cleanup); diff -Nru a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h --- a/drivers/char/agp/agp.h Sat Jul 20 12:12:35 2002 +++ b/drivers/char/agp/agp.h Sat Jul 20 12:12:35 2002 @@ -27,6 +27,67 @@ #ifndef _AGP_BACKEND_PRIV_H #define _AGP_BACKEND_PRIV_H 1 +#include /* for flush_agp_cache() */ + +extern struct agp_bridge_data agp_bridge; + +/* Generic routines. */ +void agp_generic_agp_enable(u32 mode); +int agp_generic_create_gatt_table(void); +int agp_generic_free_gatt_table(void); +agp_memory *agp_create_memory(int scratch_pages); +int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type); +int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, int type); +agp_memory *agp_generic_alloc_by_type(size_t page_count, int type); +void agp_generic_free_by_type(agp_memory * curr); +void *agp_generic_alloc_page(void); +void agp_generic_destroy_page(void *addr); +int agp_generic_suspend(void); +void agp_generic_resume(void); +void agp_free_key(int key); + +/* chipset specific init routines. */ +int __init ali_generic_setup (struct pci_dev *pdev); +int __init amd_irongate_setup (struct pci_dev *pdev); +int __init hp_zx1_setup (struct pci_dev *pdev); +int __init intel_i460_setup (struct pci_dev *pdev); +int __init intel_generic_setup (struct pci_dev *pdev); +int __init intel_i810_setup(struct pci_dev *i810_dev); +int __init intel_815_setup(struct pci_dev *pdev); +int __init intel_i830_setup(struct pci_dev *i830_dev); +int __init intel_820_setup (struct pci_dev *pdev); +int __init intel_830mp_setup (struct pci_dev *pdev); +int __init intel_840_setup (struct pci_dev *pdev); +int __init intel_845_setup (struct pci_dev *pdev); +int __init intel_850_setup (struct pci_dev *pdev); +int __init intel_860_setup (struct pci_dev *pdev); +int __init serverworks_setup (struct pci_dev *pdev); +int __init sis_generic_setup (struct pci_dev *pdev); +int __init via_generic_setup (struct pci_dev *pdev); + +#define AGPGART_MODULE_NAME "agpgart" +#define PFX AGPGART_MODULE_NAME ": " + + +#ifdef CONFIG_SMP +static void ipi_handler(void *null) +{ + flush_agp_cache(); +} + +static void __attribute__((unused)) global_cache_flush(void) +{ + if (smp_call_function(ipi_handler, NULL, 1, 1) != 0) + panic(PFX "timed out waiting for the other CPUs!\n"); + flush_agp_cache(); +} +#else +static void global_cache_flush(void) +{ + flush_agp_cache(); +} +#endif /* !CONFIG_SMP */ + enum aper_size_type { U8_APER_SIZE, U16_APER_SIZE, @@ -35,55 +96,55 @@ FIXED_APER_SIZE }; -typedef struct _gatt_mask { +struct gatt_mask { unsigned long mask; u32 type; /* totally device specific, for integrated chipsets that * might have different types of memory masks. For other * devices this will probably be ignored */ -} gatt_mask; +}; -typedef struct _aper_size_info_8 { +struct aper_size_info_8 { int size; int num_entries; int page_order; u8 size_value; -} aper_size_info_8; +}; -typedef struct _aper_size_info_16 { +struct aper_size_info_16 { int size; int num_entries; int page_order; u16 size_value; -} aper_size_info_16; +}; -typedef struct _aper_size_info_32 { +struct aper_size_info_32 { int size; int num_entries; int page_order; u32 size_value; -} aper_size_info_32; +}; -typedef struct _aper_size_info_lvl2 { +struct aper_size_info_lvl2 { int size; int num_entries; u32 size_value; -} aper_size_info_lvl2; +}; -typedef struct _aper_size_info_fixed { +struct aper_size_info_fixed { int size; int num_entries; int page_order; -} aper_size_info_fixed; +}; struct agp_bridge_data { - agp_version *version; + struct agp_version *version; void *aperture_sizes; void *previous_size; void *current_size; void *dev_private_data; struct pci_dev *dev; - gatt_mask *masks; + struct gatt_mask *masks; unsigned long *gatt_table; unsigned long *gatt_table_real; unsigned long scratch_page; @@ -125,26 +186,26 @@ }; -#define OUTREG64(mmap, addr, val) __raw_writeq((val), (mmap)+(addr)) -#define OUTREG32(mmap, addr, val) __raw_writel((val), (mmap)+(addr)) -#define OUTREG16(mmap, addr, val) __raw_writew((val), (mmap)+(addr)) -#define OUTREG8(mmap, addr, val) __raw_writeb((val), (mmap)+(addr)) - -#define INREG64(mmap, addr) __raw_readq((mmap)+(addr)) -#define INREG32(mmap, addr) __raw_readl((mmap)+(addr)) -#define INREG16(mmap, addr) __raw_readw((mmap)+(addr)) -#define INREG8(mmap, addr) __raw_readb((mmap)+(addr)) - -#define KB(x) ((x) * 1024) -#define MB(x) (KB (KB (x))) -#define GB(x) (MB (KB (x))) +#define OUTREG64(mmap, addr, val) __raw_writeq((val), (mmap)+(addr)) +#define OUTREG32(mmap, addr, val) __raw_writel((val), (mmap)+(addr)) +#define OUTREG16(mmap, addr, val) __raw_writew((val), (mmap)+(addr)) +#define OUTREG8(mmap, addr, val) __raw_writeb((val), (mmap)+(addr)) + +#define INREG64(mmap, addr) __raw_readq((mmap)+(addr)) +#define INREG32(mmap, addr) __raw_readl((mmap)+(addr)) +#define INREG16(mmap, addr) __raw_readw((mmap)+(addr)) +#define INREG8(mmap, addr) __raw_readb((mmap)+(addr)) + +#define KB(x) ((x) * 1024) +#define MB(x) (KB (KB (x))) +#define GB(x) (MB (KB (x))) #define CACHE_FLUSH agp_bridge.cache_flush -#define A_SIZE_8(x) ((aper_size_info_8 *) x) -#define A_SIZE_16(x) ((aper_size_info_16 *) x) -#define A_SIZE_32(x) ((aper_size_info_32 *) x) -#define A_SIZE_LVL2(x) ((aper_size_info_lvl2 *) x) -#define A_SIZE_FIX(x) ((aper_size_info_fixed *) x) +#define A_SIZE_8(x) ((struct aper_size_info_8 *) x) +#define A_SIZE_16(x) ((struct aper_size_info_16 *) x) +#define A_SIZE_32(x) ((struct aper_size_info_32 *) x) +#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x) +#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x) #define A_IDX8() (A_SIZE_8(agp_bridge.aperture_sizes) + i) #define A_IDX16() (A_SIZE_16(agp_bridge.aperture_sizes) + i) #define A_IDX32() (A_SIZE_32(agp_bridge.aperture_sizes) + i) @@ -152,91 +213,88 @@ #define A_IDXFIX() (A_SIZE_FIX(agp_bridge.aperture_sizes) + i) #define MAXKEY (4096 * 32) -#define AGPGART_MODULE_NAME "agpgart" -#define PFX AGPGART_MODULE_NAME ": " - -#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) +#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge.scratch_page) #ifndef PCI_DEVICE_ID_VIA_82C691_0 -#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 +#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 #endif #ifndef PCI_DEVICE_ID_VIA_8371_0 -#define PCI_DEVICE_ID_VIA_8371_0 0x0391 +#define PCI_DEVICE_ID_VIA_8371_0 0x0391 #endif #ifndef PCI_DEVICE_ID_VIA_8363_0 -#define PCI_DEVICE_ID_VIA_8363_0 0x0305 +#define PCI_DEVICE_ID_VIA_8363_0 0x0305 #endif #ifndef PCI_DEVICE_ID_VIA_82C694X_0 -#define PCI_DEVICE_ID_VIA_82C694X_0 0x0605 +#define PCI_DEVICE_ID_VIA_82C694X_0 0x0605 #endif #ifndef PCI_DEVICE_ID_INTEL_810_0 -#define PCI_DEVICE_ID_INTEL_810_0 0x7120 +#define PCI_DEVICE_ID_INTEL_810_0 0x7120 #endif #ifndef PCI_DEVICE_ID_INTEL_845_G_0 #define PCI_DEVICE_ID_INTEL_845_G_0 0x2560 #endif #ifndef PCI_DEVICE_ID_INTEL_845_G_1 -#define PCI_DEVICE_ID_INTEL_845_G_1 0x2562 +#define PCI_DEVICE_ID_INTEL_845_G_1 0x2562 #endif #ifndef PCI_DEVICE_ID_INTEL_830_M_0 #define PCI_DEVICE_ID_INTEL_830_M_0 0x3575 #endif #ifndef PCI_DEVICE_ID_INTEL_830_M_1 -#define PCI_DEVICE_ID_INTEL_830_M_1 0x3577 +#define PCI_DEVICE_ID_INTEL_830_M_1 0x3577 #endif #ifndef PCI_DEVICE_ID_INTEL_820_0 -#define PCI_DEVICE_ID_INTEL_820_0 0x2500 +#define PCI_DEVICE_ID_INTEL_820_0 0x2500 #endif #ifndef PCI_DEVICE_ID_INTEL_820_UP_0 -#define PCI_DEVICE_ID_INTEL_820_UP_0 0x2501 +#define PCI_DEVICE_ID_INTEL_820_UP_0 0x2501 #endif #ifndef PCI_DEVICE_ID_INTEL_840_0 -#define PCI_DEVICE_ID_INTEL_840_0 0x1a21 +#define PCI_DEVICE_ID_INTEL_840_0 0x1a21 #endif #ifndef PCI_DEVICE_ID_INTEL_845_0 -#define PCI_DEVICE_ID_INTEL_845_0 0x1a30 +#define PCI_DEVICE_ID_INTEL_845_0 0x1a30 #endif #ifndef PCI_DEVICE_ID_INTEL_850_0 -#define PCI_DEVICE_ID_INTEL_850_0 0x2530 +#define PCI_DEVICE_ID_INTEL_850_0 0x2530 #endif #ifndef PCI_DEVICE_ID_INTEL_860_0 -#define PCI_DEVICE_ID_INTEL_860_0 0x2531 +#define PCI_DEVICE_ID_INTEL_860_0 0x2531 #endif #ifndef PCI_DEVICE_ID_INTEL_810_DC100_0 -#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122 +#define PCI_DEVICE_ID_INTEL_810_DC100_0 0x7122 #endif #ifndef PCI_DEVICE_ID_INTEL_810_E_0 -#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124 +#define PCI_DEVICE_ID_INTEL_810_E_0 0x7124 #endif #ifndef PCI_DEVICE_ID_INTEL_82443GX_0 -#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 +#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 #endif #ifndef PCI_DEVICE_ID_INTEL_810_1 -#define PCI_DEVICE_ID_INTEL_810_1 0x7121 +#define PCI_DEVICE_ID_INTEL_810_1 0x7121 #endif #ifndef PCI_DEVICE_ID_INTEL_810_DC100_1 -#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123 +#define PCI_DEVICE_ID_INTEL_810_DC100_1 0x7123 #endif #ifndef PCI_DEVICE_ID_INTEL_810_E_1 -#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125 +#define PCI_DEVICE_ID_INTEL_810_E_1 0x7125 #endif #ifndef PCI_DEVICE_ID_INTEL_815_0 -#define PCI_DEVICE_ID_INTEL_815_0 0x1130 +#define PCI_DEVICE_ID_INTEL_815_0 0x1130 #endif #ifndef PCI_DEVICE_ID_INTEL_815_1 -#define PCI_DEVICE_ID_INTEL_815_1 0x1132 +#define PCI_DEVICE_ID_INTEL_815_1 0x1132 #endif #ifndef PCI_DEVICE_ID_INTEL_82443GX_1 -#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 +#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71a1 #endif #ifndef PCI_DEVICE_ID_INTEL_460GX -#define PCI_DEVICE_ID_INTEL_460GX 0x84ea +#define PCI_DEVICE_ID_INTEL_460GX 0x84ea #endif #ifndef PCI_DEVICE_ID_AMD_IRONGATE_0 -#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 +#define PCI_DEVICE_ID_AMD_IRONGATE_0 0x7006 #endif #ifndef PCI_DEVICE_ID_AMD_761_0 -#define PCI_DEVICE_ID_AMD_761_0 0x700e +#define PCI_DEVICE_ID_AMD_761_0 0x700e #endif #ifndef PCI_DEVICE_ID_AMD_762_0 #define PCI_DEVICE_ID_AMD_762_0 0x700C @@ -270,12 +328,12 @@ #endif /* intel register */ -#define INTEL_APBASE 0x10 -#define INTEL_APSIZE 0xb4 -#define INTEL_ATTBASE 0xb8 -#define INTEL_AGPCTRL 0xb0 -#define INTEL_NBXCFG 0x50 -#define INTEL_ERRSTS 0x91 +#define INTEL_APBASE 0x10 +#define INTEL_APSIZE 0xb4 +#define INTEL_ATTBASE 0xb8 +#define INTEL_AGPCTRL 0xb0 +#define INTEL_NBXCFG 0x50 +#define INTEL_ERRSTS 0x91 /* Intel 460GX Registers */ #define INTEL_I460_APBASE 0x10 @@ -287,116 +345,120 @@ #define INTEL_I460_GATT_COHERENT (1UL << 25) /* intel i830 registers */ -#define I830_GMCH_CTRL 0x52 -#define I830_GMCH_ENABLED 0x4 -#define I830_GMCH_MEM_MASK 0x1 -#define I830_GMCH_MEM_64M 0x1 -#define I830_GMCH_MEM_128M 0 -#define I830_GMCH_GMS_MASK 0x70 -#define I830_GMCH_GMS_DISABLED 0x00 -#define I830_GMCH_GMS_LOCAL 0x10 -#define I830_GMCH_GMS_STOLEN_512 0x20 -#define I830_GMCH_GMS_STOLEN_1024 0x30 -#define I830_GMCH_GMS_STOLEN_8192 0x40 -#define I830_RDRAM_CHANNEL_TYPE 0x03010 -#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) -#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) +#define I830_GMCH_CTRL 0x52 +#define I830_GMCH_ENABLED 0x4 +#define I830_GMCH_MEM_MASK 0x1 +#define I830_GMCH_MEM_64M 0x1 +#define I830_GMCH_MEM_128M 0 +#define I830_GMCH_GMS_MASK 0x70 +#define I830_GMCH_GMS_DISABLED 0x00 +#define I830_GMCH_GMS_LOCAL 0x10 +#define I830_GMCH_GMS_STOLEN_512 0x20 +#define I830_GMCH_GMS_STOLEN_1024 0x30 +#define I830_GMCH_GMS_STOLEN_8192 0x40 +#define I830_RDRAM_CHANNEL_TYPE 0x03010 +#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5) +#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3) /* This one is for I830MP w. an external graphic card */ -#define INTEL_I830_ERRSTS 0x92 +#define INTEL_I830_ERRSTS 0x92 + +/* intel 815 register */ +#define INTEL_815_APCONT 0x51 +#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF /* intel i820 registers */ -#define INTEL_I820_RDCR 0x51 -#define INTEL_I820_ERRSTS 0xc8 +#define INTEL_I820_RDCR 0x51 +#define INTEL_I820_ERRSTS 0xc8 /* intel i840 registers */ -#define INTEL_I840_MCHCFG 0x50 -#define INTEL_I840_ERRSTS 0xc8 +#define INTEL_I840_MCHCFG 0x50 +#define INTEL_I840_ERRSTS 0xc8 /* intel i845 registers */ -#define INTEL_I845_AGPM 0x51 -#define INTEL_I845_ERRSTS 0xc8 +#define INTEL_I845_AGPM 0x51 +#define INTEL_I845_ERRSTS 0xc8 /* intel i850 registers */ -#define INTEL_I850_MCHCFG 0x50 -#define INTEL_I850_ERRSTS 0xc8 +#define INTEL_I850_MCHCFG 0x50 +#define INTEL_I850_ERRSTS 0xc8 /* intel i860 registers */ #define INTEL_I860_MCHCFG 0x50 #define INTEL_I860_ERRSTS 0xc8 /* intel i810 registers */ -#define I810_GMADDR 0x10 -#define I810_MMADDR 0x14 -#define I810_PTE_BASE 0x10000 -#define I810_PTE_MAIN_UNCACHED 0x00000000 -#define I810_PTE_LOCAL 0x00000002 -#define I810_PTE_VALID 0x00000001 -#define I810_SMRAM_MISCC 0x70 -#define I810_GFX_MEM_WIN_SIZE 0x00010000 -#define I810_GFX_MEM_WIN_32M 0x00010000 -#define I810_GMS 0x000000c0 -#define I810_GMS_DISABLE 0x00000000 -#define I810_PGETBL_CTL 0x2020 -#define I810_PGETBL_ENABLED 0x00000001 -#define I810_DRAM_CTL 0x3000 -#define I810_DRAM_ROW_0 0x00000001 -#define I810_DRAM_ROW_0_SDRAM 0x00000001 +#define I810_GMADDR 0x10 +#define I810_MMADDR 0x14 +#define I810_PTE_BASE 0x10000 +#define I810_PTE_MAIN_UNCACHED 0x00000000 +#define I810_PTE_LOCAL 0x00000002 +#define I810_PTE_VALID 0x00000001 +#define I810_SMRAM_MISCC 0x70 +#define I810_GFX_MEM_WIN_SIZE 0x00010000 +#define I810_GFX_MEM_WIN_32M 0x00010000 +#define I810_GMS 0x000000c0 +#define I810_GMS_DISABLE 0x00000000 +#define I810_PGETBL_CTL 0x2020 +#define I810_PGETBL_ENABLED 0x00000001 +#define I810_DRAM_CTL 0x3000 +#define I810_DRAM_ROW_0 0x00000001 +#define I810_DRAM_ROW_0_SDRAM 0x00000001 /* VIA register */ -#define VIA_APBASE 0x10 -#define VIA_GARTCTRL 0x80 -#define VIA_APSIZE 0x84 -#define VIA_ATTBASE 0x88 +#define VIA_APBASE 0x10 +#define VIA_GARTCTRL 0x80 +#define VIA_APSIZE 0x84 +#define VIA_ATTBASE 0x88 /* SiS registers */ -#define SIS_APBASE 0x10 -#define SIS_ATTBASE 0x90 -#define SIS_APSIZE 0x94 -#define SIS_TLBCNTRL 0x97 -#define SIS_TLBFLUSH 0x98 +#define SIS_APBASE 0x10 +#define SIS_ATTBASE 0x90 +#define SIS_APSIZE 0x94 +#define SIS_TLBCNTRL 0x97 +#define SIS_TLBFLUSH 0x98 /* AMD registers */ -#define AMD_APBASE 0x10 -#define AMD_MMBASE 0x14 -#define AMD_APSIZE 0xac -#define AMD_MODECNTL 0xb0 -#define AMD_MODECNTL2 0xb2 -#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ -#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ -#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ -#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ +#define AMD_APBASE 0x10 +#define AMD_MMBASE 0x14 +#define AMD_APSIZE 0xac +#define AMD_MODECNTL 0xb0 +#define AMD_MODECNTL2 0xb2 +#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */ +#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */ +#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ +#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ /* ALi registers */ -#define ALI_APBASE 0x10 -#define ALI_AGPCTRL 0xb8 -#define ALI_ATTBASE 0xbc -#define ALI_TLBCTRL 0xc0 -#define ALI_TAGCTRL 0xc4 -#define ALI_CACHE_FLUSH_CTRL 0xD0 +#define ALI_APBASE 0x10 +#define ALI_AGPCTRL 0xb8 +#define ALI_ATTBASE 0xbc +#define ALI_TLBCTRL 0xc0 +#define ALI_TAGCTRL 0xc4 +#define ALI_CACHE_FLUSH_CTRL 0xD0 #define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000 -#define ALI_CACHE_FLUSH_EN 0x100 +#define ALI_CACHE_FLUSH_EN 0x100 /* Serverworks Registers */ -#define SVWRKS_APSIZE 0x10 -#define SVWRKS_SIZE_MASK 0xfe000000 +#define SVWRKS_APSIZE 0x10 +#define SVWRKS_SIZE_MASK 0xfe000000 -#define SVWRKS_MMBASE 0x14 -#define SVWRKS_CACHING 0x4b -#define SVWRKS_FEATURE 0x68 +#define SVWRKS_MMBASE 0x14 +#define SVWRKS_CACHING 0x4b +#define SVWRKS_FEATURE 0x68 /* func 1 registers */ -#define SVWRKS_AGP_ENABLE 0x60 -#define SVWRKS_COMMAND 0x04 +#define SVWRKS_AGP_ENABLE 0x60 +#define SVWRKS_COMMAND 0x04 /* Memory mapped registers */ -#define SVWRKS_GART_CACHE 0x02 -#define SVWRKS_GATTBASE 0x04 -#define SVWRKS_TLBFLUSH 0x10 -#define SVWRKS_POSTFLUSH 0x14 -#define SVWRKS_DIRFLUSH 0x0c +#define SVWRKS_GART_CACHE 0x02 +#define SVWRKS_GATTBASE 0x04 +#define SVWRKS_TLBFLUSH 0x10 +#define SVWRKS_POSTFLUSH 0x14 +#define SVWRKS_DIRFLUSH 0x0c /* HP ZX1 SBA registers */ #define HP_ZX1_CTRL 0x200 diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c --- a/drivers/char/agp/agpgart_be.c Sat Jul 20 12:12:34 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,5114 +0,0 @@ -/* - * AGPGART module version 0.99 - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * TODO: - * - Allocate more than order 0 pages to avoid too much linear map splitting. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "agp.h" - -MODULE_AUTHOR("Jeff Hartmann "); -MODULE_PARM(agp_try_unsupported, "1i"); -MODULE_LICENSE("GPL and additional rights"); -EXPORT_SYMBOL(agp_free_memory); -EXPORT_SYMBOL(agp_allocate_memory); -EXPORT_SYMBOL(agp_copy_info); -EXPORT_SYMBOL(agp_bind_memory); -EXPORT_SYMBOL(agp_unbind_memory); -EXPORT_SYMBOL(agp_enable); -EXPORT_SYMBOL(agp_backend_acquire); -EXPORT_SYMBOL(agp_backend_release); - -static struct agp_bridge_data agp_bridge; -static int agp_try_unsupported __initdata = 0; - -#ifdef CONFIG_SMP -static void ipi_handler(void *null) -{ - flush_agp_cache(); -} - -static void smp_flush_cache(void) -{ - if (smp_call_function(ipi_handler, NULL, 1, 1) != 0) - panic(PFX "timed out waiting for the other CPUs!\n"); - flush_agp_cache(); -} -#define global_cache_flush smp_flush_cache -#else /* CONFIG_SMP */ -static void global_cache_flush(void) -{ - flush_agp_cache(); -} -#endif /* !CONFIG_SMP */ - -int agp_backend_acquire(void) -{ - if (agp_bridge.type == NOT_SUPPORTED) { - return -EINVAL; - } - atomic_inc(&agp_bridge.agp_in_use); - - if (atomic_read(&agp_bridge.agp_in_use) != 1) { - atomic_dec(&agp_bridge.agp_in_use); - return -EBUSY; - } - MOD_INC_USE_COUNT; - return 0; -} - -void agp_backend_release(void) -{ - if (agp_bridge.type == NOT_SUPPORTED) { - return; - } - atomic_dec(&agp_bridge.agp_in_use); - MOD_DEC_USE_COUNT; -} - -/* - * Generic routines for handling agp_memory structures - - * They use the basic page allocation routines to do the - * brunt of the work. - */ - - -static void agp_free_key(int key) -{ - - if (key < 0) { - return; - } - if (key < MAXKEY) { - clear_bit(key, agp_bridge.key_list); - } -} - -static int agp_get_key(void) -{ - int bit; - - bit = find_first_zero_bit(agp_bridge.key_list, MAXKEY); - if (bit < MAXKEY) { - set_bit(bit, agp_bridge.key_list); - return bit; - } - return -1; -} - -static agp_memory *agp_create_memory(int scratch_pages) -{ - agp_memory *new; - - new = kmalloc(sizeof(agp_memory), GFP_KERNEL); - - if (new == NULL) { - return NULL; - } - memset(new, 0, sizeof(agp_memory)); - new->key = agp_get_key(); - - if (new->key < 0) { - kfree(new); - return NULL; - } - new->memory = vmalloc(PAGE_SIZE * scratch_pages); - - if (new->memory == NULL) { - agp_free_key(new->key); - kfree(new); - return NULL; - } - new->num_scratch_pages = scratch_pages; - return new; -} - -void agp_free_memory(agp_memory * curr) -{ - int i; - - if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) { - return; - } - if (curr->is_bound == TRUE) { - agp_unbind_memory(curr); - } - if (curr->type != 0) { - agp_bridge.free_by_type(curr); - return; - } - if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->memory[i] &= ~(0x00000fff); - agp_bridge.agp_destroy_page(phys_to_virt(curr->memory[i])); - } - } - agp_free_key(curr->key); - vfree(curr->memory); - kfree(curr); - MOD_DEC_USE_COUNT; -} - -#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) - -agp_memory *agp_allocate_memory(size_t page_count, u32 type) -{ - int scratch_pages; - agp_memory *new; - int i; - - if (agp_bridge.type == NOT_SUPPORTED) { - return NULL; - } - if ((atomic_read(&agp_bridge.current_memory_agp) + page_count) > - agp_bridge.max_memory_agp) { - return NULL; - } - - if (type != 0) { - new = agp_bridge.alloc_by_type(page_count, type); - return new; - } - /* We always increase the module count, since free auto-decrements - * it - */ - - MOD_INC_USE_COUNT; - - scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; - - new = agp_create_memory(scratch_pages); - - if (new == NULL) { - MOD_DEC_USE_COUNT; - return NULL; - } - - for (i = 0; i < page_count; i++) { - void *addr = agp_bridge.agp_alloc_page(); - - if (addr == NULL) { - /* Free this structure */ - agp_free_memory(new); - return NULL; - } - new->memory[i] = - agp_bridge.mask_memory(virt_to_phys(addr), type); - new->page_count++; - } - - flush_agp_mappings(); - - return new; -} - -/* End - Generic routines for handling agp_memory structures */ - -static int agp_return_size(void) -{ - int current_size; - void *temp; - - temp = agp_bridge.current_size; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - current_size = A_SIZE_8(temp)->size; - break; - case U16_APER_SIZE: - current_size = A_SIZE_16(temp)->size; - break; - case U32_APER_SIZE: - current_size = A_SIZE_32(temp)->size; - break; - case LVL2_APER_SIZE: - current_size = A_SIZE_LVL2(temp)->size; - break; - case FIXED_APER_SIZE: - current_size = A_SIZE_FIX(temp)->size; - break; - default: - current_size = 0; - break; - } - - return current_size; -} - -/* Routine to copy over information structure */ - -void agp_copy_info(agp_kern_info * info) -{ - unsigned long page_mask = 0; - int i; - - memset(info, 0, sizeof(agp_kern_info)); - if (agp_bridge.type == NOT_SUPPORTED) { - info->chipset = agp_bridge.type; - return; - } - info->version.major = agp_bridge.version->major; - info->version.minor = agp_bridge.version->minor; - info->device = agp_bridge.dev; - info->chipset = agp_bridge.type; - info->mode = agp_bridge.mode; - info->aper_base = agp_bridge.gart_bus_addr; - info->aper_size = agp_return_size(); - info->max_memory = agp_bridge.max_memory_agp; - info->current_memory = atomic_read(&agp_bridge.current_memory_agp); - info->cant_use_aperture = agp_bridge.cant_use_aperture; - - for(i = 0; i < agp_bridge.num_of_masks; i++) - page_mask |= agp_bridge.mask_memory(page_mask, i); - - info->page_mask = ~page_mask; -} - -/* End - Routine to copy over information structure */ - -/* - * Routines for handling swapping of agp_memory into the GATT - - * These routines take agp_memory and insert them into the GATT. - * They call device specific routines to actually write to the GATT. - */ - -int agp_bind_memory(agp_memory * curr, off_t pg_start) -{ - int ret_val; - - if ((agp_bridge.type == NOT_SUPPORTED) || - (curr == NULL) || (curr->is_bound == TRUE)) { - return -EINVAL; - } - if (curr->is_flushed == FALSE) { - CACHE_FLUSH(); - curr->is_flushed = TRUE; - } - ret_val = agp_bridge.insert_memory(curr, pg_start, curr->type); - - if (ret_val != 0) { - return ret_val; - } - curr->is_bound = TRUE; - curr->pg_start = pg_start; - return 0; -} - -int agp_unbind_memory(agp_memory * curr) -{ - int ret_val; - - if ((agp_bridge.type == NOT_SUPPORTED) || (curr == NULL)) { - return -EINVAL; - } - if (curr->is_bound != TRUE) { - return -EINVAL; - } - ret_val = agp_bridge.remove_memory(curr, curr->pg_start, curr->type); - - if (ret_val != 0) { - return ret_val; - } - curr->is_bound = FALSE; - curr->pg_start = 0; - return 0; -} - -/* End - Routines for handling swapping of agp_memory into the GATT */ - -/* - * Driver routines - start - * Currently this module supports the following chipsets: - * i810, i815, 440lx, 440bx, 440gx, i830, i840, i845, i850, i860, via vp3, - * via mvp3, via kx133, via kt133, amd irongate, amd 761, amd 762, ALi M1541, - * and generic support for the SiS chipsets. - */ - -/* Generic Agp routines - Start */ - -static void agp_generic_agp_enable(u32 mode) -{ - struct pci_dev *device = NULL; - u32 command, scratch; - u8 cap_ptr; - - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + 4, - &command); - - /* - * PASS1: go throu all devices that claim to be - * AGP devices and collect their data. - */ - - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) { - /* - * Ok, here we have a AGP device. Disable impossible - * settings, and adjust the readqueue to the minimum. - */ - - pci_read_config_dword(device, cap_ptr + 4, &scratch); - - /* adjust RQ depth */ - command = - ((command & ~0xff000000) | - min_t(u32, (mode & 0xff000000), - min_t(u32, (command & 0xff000000), - (scratch & 0xff000000)))); - - /* disable SBA if it's not supported */ - if (!((command & 0x00000200) && - (scratch & 0x00000200) && - (mode & 0x00000200))) - command &= ~0x00000200; - - /* disable FW if it's not supported */ - if (!((command & 0x00000010) && - (scratch & 0x00000010) && - (mode & 0x00000010))) - command &= ~0x00000010; - - if (!((command & 4) && - (scratch & 4) && - (mode & 4))) - command &= ~0x00000004; - - if (!((command & 2) && - (scratch & 2) && - (mode & 2))) - command &= ~0x00000002; - - if (!((command & 1) && - (scratch & 1) && - (mode & 1))) - command &= ~0x00000001; - } - } - /* - * PASS2: Figure out the 4X/2X/1X setting and enable the - * target (our motherboard chipset). - */ - - if (command & 4) { - command &= ~3; /* 4X */ - } - if (command & 2) { - command &= ~5; /* 2X */ - } - if (command & 1) { - command &= ~6; /* 1X */ - } - command |= 0x00000100; - - pci_write_config_dword(agp_bridge.dev, - agp_bridge.capndx + 8, - command); - - /* - * PASS3: Go throu all AGP devices and update the - * command registers. - */ - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) - pci_write_config_dword(device, cap_ptr + 8, command); - } -} - -static int agp_generic_create_gatt_table(void) -{ - char *table; - char *table_end; - int size; - int page_order; - int num_entries; - int i; - void *temp; - struct page *page; - - /* The generic routines can't handle 2 level gatt's */ - if (agp_bridge.size_type == LVL2_APER_SIZE) { - return -EINVAL; - } - - table = NULL; - i = agp_bridge.aperture_size_idx; - temp = agp_bridge.current_size; - size = page_order = num_entries = 0; - - if (agp_bridge.size_type != FIXED_APER_SIZE) { - do { - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - size = A_SIZE_8(temp)->size; - page_order = - A_SIZE_8(temp)->page_order; - num_entries = - A_SIZE_8(temp)->num_entries; - break; - case U16_APER_SIZE: - size = A_SIZE_16(temp)->size; - page_order = A_SIZE_16(temp)->page_order; - num_entries = A_SIZE_16(temp)->num_entries; - break; - case U32_APER_SIZE: - size = A_SIZE_32(temp)->size; - page_order = A_SIZE_32(temp)->page_order; - num_entries = A_SIZE_32(temp)->num_entries; - break; - /* This case will never really happen. */ - case FIXED_APER_SIZE: - case LVL2_APER_SIZE: - default: - size = page_order = num_entries = 0; - break; - } - - table = (char *) __get_free_pages(GFP_KERNEL, - page_order); - - if (table == NULL) { - i++; - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - agp_bridge.current_size = A_IDX8(); - break; - case U16_APER_SIZE: - agp_bridge.current_size = A_IDX16(); - break; - case U32_APER_SIZE: - agp_bridge.current_size = A_IDX32(); - break; - /* This case will never really - * happen. - */ - case FIXED_APER_SIZE: - case LVL2_APER_SIZE: - default: - agp_bridge.current_size = - agp_bridge.current_size; - break; - } - temp = agp_bridge.current_size; - } else { - agp_bridge.aperture_size_idx = i; - } - } while ((table == NULL) && - (i < agp_bridge.num_aperture_sizes)); - } else { - size = ((aper_size_info_fixed *) temp)->size; - page_order = ((aper_size_info_fixed *) temp)->page_order; - num_entries = ((aper_size_info_fixed *) temp)->num_entries; - table = (char *) __get_free_pages(GFP_KERNEL, page_order); - } - - if (table == NULL) { - return -ENOMEM; - } - table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); - - for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) - SetPageReserved(page); - - agp_bridge.gatt_table_real = (unsigned long *) table; - CACHE_FLUSH(); - agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), - (PAGE_SIZE * (1 << page_order))); - CACHE_FLUSH(); - - if (agp_bridge.gatt_table == NULL) { - for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) - ClearPageReserved(page); - - free_pages((unsigned long) table, page_order); - - return -ENOMEM; - } - agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); - - for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = - (unsigned long) agp_bridge.scratch_page; - } - - return 0; -} - -static int agp_generic_suspend(void) -{ - return 0; -} - -static void agp_generic_resume(void) -{ - return; -} - -static int agp_generic_free_gatt_table(void) -{ - int page_order; - char *table, *table_end; - void *temp; - struct page *page; - - temp = agp_bridge.current_size; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - page_order = A_SIZE_8(temp)->page_order; - break; - case U16_APER_SIZE: - page_order = A_SIZE_16(temp)->page_order; - break; - case U32_APER_SIZE: - page_order = A_SIZE_32(temp)->page_order; - break; - case FIXED_APER_SIZE: - page_order = A_SIZE_FIX(temp)->page_order; - break; - case LVL2_APER_SIZE: - /* The generic routines can't deal with 2 level gatt's */ - return -EINVAL; - break; - default: - page_order = 0; - break; - } - - /* Do not worry about freeing memory, because if this is - * called, then all agp memory is deallocated and removed - * from the table. - */ - - iounmap(agp_bridge.gatt_table); - table = (char *) agp_bridge.gatt_table_real; - table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); - - for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) - ClearPageReserved(page); - - free_pages((unsigned long) agp_bridge.gatt_table_real, page_order); - return 0; -} - -static int agp_generic_insert_memory(agp_memory * mem, - off_t pg_start, int type) -{ - int i, j, num_entries; - void *temp; - - temp = agp_bridge.current_size; - - switch (agp_bridge.size_type) { - case U8_APER_SIZE: - num_entries = A_SIZE_8(temp)->num_entries; - break; - case U16_APER_SIZE: - num_entries = A_SIZE_16(temp)->num_entries; - break; - case U32_APER_SIZE: - num_entries = A_SIZE_32(temp)->num_entries; - break; - case FIXED_APER_SIZE: - num_entries = A_SIZE_FIX(temp)->num_entries; - break; - case LVL2_APER_SIZE: - /* The generic routines can't deal with 2 level gatt's */ - return -EINVAL; - break; - default: - num_entries = 0; - break; - } - - if (type != 0 || mem->type != 0) { - /* The generic routines know nothing of memory types */ - return -EINVAL; - } - if ((pg_start + mem->page_count) > num_entries) { - return -EINVAL; - } - j = pg_start; - - while (j < (pg_start + mem->page_count)) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { - return -EBUSY; - } - j++; - } - - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - agp_bridge.gatt_table[j] = mem->memory[i]; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static int agp_generic_remove_memory(agp_memory * mem, off_t pg_start, - int type) -{ - int i; - - if (type != 0 || mem->type != 0) { - /* The generic routines know nothing of memory types */ - return -EINVAL; - } - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - agp_bridge.gatt_table[i] = - (unsigned long) agp_bridge.scratch_page; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) -{ - return NULL; -} - -static void agp_generic_free_by_type(agp_memory * curr) -{ - if (curr->memory != NULL) { - vfree(curr->memory); - } - agp_free_key(curr->key); - kfree(curr); -} - -/* - * Basic Page Allocation Routines - - * These routines handle page allocation - * and by default they reserve the allocated - * memory. They also handle incrementing the - * current_memory_agp value, Which is checked - * against a maximum value. - */ - -static void *agp_generic_alloc_page(void) -{ - struct page * page; - - page = alloc_page(GFP_KERNEL); - if (page == NULL) - return 0; - - map_page_into_agp(page); - - get_page(page); - SetPageLocked(page); - atomic_inc(&agp_bridge.current_memory_agp); - return page_address(page); -} - -static void agp_generic_destroy_page(void *addr) -{ - struct page *page; - - if (addr == NULL) - return; - - page = virt_to_page(addr); - unmap_page_from_agp(page); - put_page(page); - unlock_page(page); - free_page((unsigned long)addr); - atomic_dec(&agp_bridge.current_memory_agp); -} - -/* End Basic Page Allocation Routines */ - -void agp_enable(u32 mode) -{ - if (agp_bridge.type == NOT_SUPPORTED) return; - agp_bridge.agp_enable(mode); -} - -/* End - Generic Agp routines */ - -#ifdef CONFIG_AGP_I810 -static aper_size_info_fixed intel_i810_sizes[] = -{ - {64, 16384, 4}, - /* The 32M mode still requires a 64k gatt */ - {32, 8192, 4} -}; - -#define AGP_DCACHE_MEMORY 1 -#define AGP_PHYS_MEMORY 2 - -static gatt_mask intel_i810_masks[] = -{ - {I810_PTE_VALID, 0}, - {(I810_PTE_VALID | I810_PTE_LOCAL), AGP_DCACHE_MEMORY}, - {I810_PTE_VALID, 0} -}; - -static struct _intel_i810_private { - struct pci_dev *i810_dev; /* device one */ - volatile u8 *registers; - int num_dcache_entries; -} intel_i810_private; - -static int intel_i810_fetch_size(void) -{ - u32 smram_miscc; - aper_size_info_fixed *values; - - pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); - values = A_SIZE_FIX(agp_bridge.aperture_sizes); - - if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - printk(KERN_WARNING PFX "i810 is disabled\n"); - return 0; - } - if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + 1); - agp_bridge.aperture_size_idx = 1; - return values[1].size; - } else { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values); - agp_bridge.aperture_size_idx = 0; - return values[0].size; - } - - return 0; -} - -static int intel_i810_configure(void) -{ - aper_size_info_fixed *current_size; - u32 temp; - int i; - - current_size = A_SIZE_FIX(agp_bridge.current_size); - - pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); - temp &= 0xfff80000; - - intel_i810_private.registers = - (volatile u8 *) ioremap(temp, 128 * 4096); - - if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) - & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { - /* This will need to be dynamically assigned */ - printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); - intel_i810_private.num_dcache_entries = 1024; - } - pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, - agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); - CACHE_FLUSH(); - - if (agp_bridge.needs_scratch_page == TRUE) { - for (i = 0; i < current_size->num_entries; i++) { - OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (i * 4), - agp_bridge.scratch_page); - } - } - return 0; -} - -static void intel_i810_cleanup(void) -{ - OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0); - iounmap((void *) intel_i810_private.registers); -} - -static void intel_i810_tlbflush(agp_memory * mem) -{ - return; -} - -static void intel_i810_agp_enable(u32 mode) -{ - return; -} - -static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, - int type) -{ - int i, j, num_entries; - void *temp; - - temp = agp_bridge.current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - - if ((pg_start + mem->page_count) > num_entries) { - return -EINVAL; - } - for (j = pg_start; j < (pg_start + mem->page_count); j++) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { - return -EBUSY; - } - } - - if (type != 0 || mem->type != 0) { - if ((type == AGP_DCACHE_MEMORY) && - (mem->type == AGP_DCACHE_MEMORY)) { - /* special insert */ - CACHE_FLUSH(); - for (i = pg_start; - i < (pg_start + mem->page_count); i++) { - OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (i * 4), - (i * 4096) | I810_PTE_LOCAL | - I810_PTE_VALID); - } - CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); - return 0; - } - if((type == AGP_PHYS_MEMORY) && - (mem->type == AGP_PHYS_MEMORY)) { - goto insert; - } - return -EINVAL; - } - -insert: - CACHE_FLUSH(); - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (j * 4), mem->memory[i]); - } - CACHE_FLUSH(); - - agp_bridge.tlb_flush(mem); - return 0; -} - -static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, - int type) -{ - int i; - - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (i * 4), - agp_bridge.scratch_page); - } - - CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); - return 0; -} - -static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) -{ - agp_memory *new; - - if (type == AGP_DCACHE_MEMORY) { - if (pg_count != intel_i810_private.num_dcache_entries) { - return NULL; - } - new = agp_create_memory(1); - - if (new == NULL) { - return NULL; - } - new->type = AGP_DCACHE_MEMORY; - new->page_count = pg_count; - new->num_scratch_pages = 0; - vfree(new->memory); - MOD_INC_USE_COUNT; - return new; - } - if(type == AGP_PHYS_MEMORY) { - void *addr; - /* The I810 requires a physical address to program - * it's mouse pointer into hardware. However the - * Xserver still writes to it through the agp - * aperture - */ - if (pg_count != 1) { - return NULL; - } - new = agp_create_memory(1); - - if (new == NULL) { - return NULL; - } - MOD_INC_USE_COUNT; - addr = agp_bridge.agp_alloc_page(); - - if (addr == NULL) { - /* Free this structure */ - agp_free_memory(new); - return NULL; - } - new->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr), type); - new->page_count = 1; - new->num_scratch_pages = 1; - new->type = AGP_PHYS_MEMORY; - new->physical = virt_to_phys((void *) new->memory[0]); - return new; - } - - return NULL; -} - -static void intel_i810_free_by_type(agp_memory * curr) -{ - agp_free_key(curr->key); - if(curr->type == AGP_PHYS_MEMORY) { - agp_bridge.agp_destroy_page( - phys_to_virt(curr->memory[0])); - vfree(curr->memory); - } - kfree(curr); - MOD_DEC_USE_COUNT; -} - -static unsigned long intel_i810_mask_memory(unsigned long addr, int type) -{ - /* Type checking must be done elsewhere */ - return addr | agp_bridge.masks[type].mask; -} - -static int __init intel_i810_setup(struct pci_dev *i810_dev) -{ - intel_i810_private.i810_dev = i810_dev; - - agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 2; - agp_bridge.aperture_sizes = (void *) intel_i810_sizes; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.num_aperture_sizes = 2; - agp_bridge.dev_private_data = (void *) &intel_i810_private; - agp_bridge.needs_scratch_page = TRUE; - agp_bridge.configure = intel_i810_configure; - agp_bridge.fetch_size = intel_i810_fetch_size; - agp_bridge.cleanup = intel_i810_cleanup; - agp_bridge.tlb_flush = intel_i810_tlbflush; - agp_bridge.mask_memory = intel_i810_mask_memory; - agp_bridge.agp_enable = intel_i810_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = intel_i810_insert_entries; - agp_bridge.remove_memory = intel_i810_remove_entries; - agp_bridge.alloc_by_type = intel_i810_alloc_by_type; - agp_bridge.free_by_type = intel_i810_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; -} - -static aper_size_info_fixed intel_i830_sizes[] = -{ - {128, 32768, 5}, - /* The 64M mode still requires a 128k gatt */ - {64, 16384, 5} -}; - -static struct _intel_i830_private { - struct pci_dev *i830_dev; /* device one */ - volatile u8 *registers; - int gtt_entries; -} intel_i830_private; - -static void intel_i830_init_gtt_entries(void) { - u16 gmch_ctrl; - int gtt_entries; - u8 rdct; - static const int ddt[4] = { 0, 16, 32, 64 }; - - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); - - switch (gmch_ctrl & I830_GMCH_GMS_MASK) { - case I830_GMCH_GMS_STOLEN_512: - gtt_entries = KB(512); - printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1)); - break; - case I830_GMCH_GMS_STOLEN_1024: - gtt_entries = MB(1); - printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1)); - break; - case I830_GMCH_GMS_STOLEN_8192: - gtt_entries = MB(8); - printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1)); - break; - case I830_GMCH_GMS_LOCAL: - rdct = INREG8(intel_i830_private.registers,I830_RDRAM_CHANNEL_TYPE); - gtt_entries = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]); - printk(KERN_INFO PFX "detected %dK local memory.\n",gtt_entries / KB(1)); - break; - default: - printk(KERN_INFO PFX "no video memory detected.\n"); - gtt_entries = 0; - break; - } - - gtt_entries /= KB(4); - - intel_i830_private.gtt_entries = gtt_entries; -} - -/* The intel i830 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i830_create_gatt_table(void) -{ - int page_order; - aper_size_info_fixed *size; - int num_entries; - u32 temp; - - size = agp_bridge.current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge.gatt_table_real = 0; - - pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp); - temp &= 0xfff80000; - - intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096); - if (!intel_i830_private.registers) return (-ENOMEM); - - temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000; - CACHE_FLUSH(); - - /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); - - agp_bridge.gatt_table = NULL; - - agp_bridge.gatt_bus_addr = temp; - - return(0); -} - -/* Return the gatt table to a sane state. Use the top of stolen - * memory for the GTT. - */ -static int intel_i830_free_gatt_table(void) -{ - return(0); -} - -static int intel_i830_fetch_size(void) -{ - u16 gmch_ctrl; - aper_size_info_fixed *values; - - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); - values = A_SIZE_FIX(agp_bridge.aperture_sizes); - - if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { - agp_bridge.previous_size = agp_bridge.current_size = (void *) values; - agp_bridge.aperture_size_idx = 0; - return(values[0].size); - } else { - agp_bridge.previous_size = agp_bridge.current_size = (void *) values; - agp_bridge.aperture_size_idx = 1; - return(values[1].size); - } - - return(0); -} - -static int intel_i830_configure(void) -{ - aper_size_info_fixed *current_size; - u32 temp; - u16 gmch_ctrl; - int i; - - current_size = A_SIZE_FIX(agp_bridge.current_size); - - pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(agp_bridge.dev,I830_GMCH_CTRL,gmch_ctrl); - - OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); - CACHE_FLUSH(); - - if (agp_bridge.needs_scratch_page == TRUE) - for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page); - - return (0); -} - -static void intel_i830_cleanup(void) -{ - iounmap((void *) intel_i830_private.registers); -} - -static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type) -{ - int i,j,num_entries; - void *temp; - - temp = agp_bridge.current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - - if (pg_start < intel_i830_private.gtt_entries) { - printk (KERN_DEBUG "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n", - pg_start,intel_i830_private.gtt_entries); - - printk ("Trying to insert into local/stolen memory\n"); - return (-EINVAL); - } - - if ((pg_start + mem->page_count) > num_entries) - return (-EINVAL); - - /* The i830 can't check the GTT for entries since its read only, - * depend on the caller to make the correct offset decisions. - */ - - if ((type != 0 && type != AGP_PHYS_MEMORY) || - (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) - return (-EINVAL); - - CACHE_FLUSH(); - - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]); - - CACHE_FLUSH(); - - agp_bridge.tlb_flush(mem); - - return(0); -} - -static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type) -{ - int i; - - CACHE_FLUSH (); - - if (pg_start < intel_i830_private.gtt_entries) { - printk ("Trying to disable local/stolen memory\n"); - return (-EINVAL); - } - - for (i = pg_start; i < (mem->page_count + pg_start); i++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page); - - CACHE_FLUSH(); - - agp_bridge.tlb_flush(mem); - - return (0); -} - -static agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type) -{ - agp_memory *nw; - - /* always return NULL for now */ - if (type == AGP_DCACHE_MEMORY) return(NULL); - - if (type == AGP_PHYS_MEMORY) { - void *addr; - - /* The i830 requires a physical address to program - * it's mouse pointer into hardware. However the - * Xserver still writes to it through the agp - * aperture - */ - - if (pg_count != 1) return(NULL); - - nw = agp_create_memory(1); - - if (nw == NULL) return(NULL); - - MOD_INC_USE_COUNT; - addr = agp_bridge.agp_alloc_page(); - if (addr == NULL) { - /* free this structure */ - agp_free_memory(nw); - return(NULL); - } - - nw->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr),type); - nw->page_count = 1; - nw->num_scratch_pages = 1; - nw->type = AGP_PHYS_MEMORY; - nw->physical = virt_to_phys(addr); - return(nw); - } - - return(NULL); -} - -static int __init intel_i830_setup(struct pci_dev *i830_dev) -{ - intel_i830_private.i830_dev = i830_dev; - - agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 3; - agp_bridge.aperture_sizes = (void *) intel_i830_sizes; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.num_aperture_sizes = 2; - - agp_bridge.dev_private_data = (void *) &intel_i830_private; - agp_bridge.needs_scratch_page = TRUE; - - agp_bridge.configure = intel_i830_configure; - agp_bridge.fetch_size = intel_i830_fetch_size; - agp_bridge.cleanup = intel_i830_cleanup; - agp_bridge.tlb_flush = intel_i810_tlbflush; - agp_bridge.mask_memory = intel_i810_mask_memory; - agp_bridge.agp_enable = intel_i810_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - - agp_bridge.create_gatt_table = intel_i830_create_gatt_table; - agp_bridge.free_gatt_table = intel_i830_free_gatt_table; - - agp_bridge.insert_memory = intel_i830_insert_entries; - agp_bridge.remove_memory = intel_i830_remove_entries; - agp_bridge.alloc_by_type = intel_i830_alloc_by_type; - agp_bridge.free_by_type = intel_i810_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return(0); -} - -#endif /* CONFIG_AGP_I810 */ - -#ifdef CONFIG_AGP_I460 - -/* BIOS configures the chipset so that one of two apbase registers are used */ -static u8 intel_i460_dynamic_apbase = 0x10; - -/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ -static u8 intel_i460_pageshift = 12; -static u32 intel_i460_pagesize; - -/* Keep track of which is larger, chipset or kernel page size. */ -static u32 intel_i460_cpk = 1; - -/* Structure for tracking partial use of 4MB GART pages */ -static u32 **i460_pg_detail = NULL; -static u32 *i460_pg_count = NULL; - -#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) -#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) - -#define I460_SRAM_IO_DISABLE (1 << 4) -#define I460_BAPBASE_ENABLE (1 << 3) -#define I460_AGPSIZ_MASK 0x7 -#define I460_4M_PS (1 << 1) - -#define log2(x) ffz(~(x)) - -static inline void intel_i460_read_back (volatile u32 *entry) -{ - /* - * The 460 spec says we have to read the last location written to - * make sure that all writes have taken effect - */ - *entry; -} - -static int intel_i460_fetch_size(void) -{ - int i; - u8 temp; - aper_size_info_8 *values; - - /* Determine the GART page size */ - pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); - intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; - intel_i460_pagesize = 1UL << intel_i460_pageshift; - - values = A_SIZE_8(agp_bridge.aperture_sizes); - - pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); - - /* Exit now if the IO drivers for the GART SRAMS are turned off */ - if (temp & I460_SRAM_IO_DISABLE) { - printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); - printk(KERN_ERR PFX "AGPGART operation not possible\n"); - return 0; - } - - /* Make sure we don't try to create an 2 ^ 23 entry GATT */ - if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { - printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); - return 0; - } - - /* Determine the proper APBASE register */ - if (temp & I460_BAPBASE_ENABLE) - intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; - else - intel_i460_dynamic_apbase = INTEL_I460_APBASE; - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - /* - * Dynamically calculate the proper num_entries and page_order values for - * the define aperture sizes. Take care not to shift off the end of - * values[i].size. - */ - values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12); - values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); - } - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - /* Neglect control bits when matching up size_value */ - if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { - agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -/* There isn't anything to do here since 460 has no GART TLB. */ -static void intel_i460_tlb_flush(agp_memory * mem) -{ - return; -} - -/* - * This utility function is needed to prevent corruption of the control bits - * which are stored along with the aperture size in 460's AGPSIZ register - */ -static void intel_i460_write_agpsiz(u8 size_value) -{ - u8 temp; - - pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); - pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, - ((temp & ~I460_AGPSIZ_MASK) | size_value)); -} - -static void intel_i460_cleanup(void) -{ - aper_size_info_8 *previous_size; - - previous_size = A_SIZE_8(agp_bridge.previous_size); - intel_i460_write_agpsiz(previous_size->size_value); - - if (intel_i460_cpk == 0) { - vfree(i460_pg_detail); - vfree(i460_pg_count); - } -} - - -/* Control bits for Out-Of-GART coherency and Burst Write Combining */ -#define I460_GXBCTL_OOG (1UL << 0) -#define I460_GXBCTL_BWC (1UL << 2) - -static int intel_i460_configure(void) -{ - union { - u32 small[2]; - u64 large; - } temp; - u8 scratch; - int i; - - aper_size_info_8 *current_size; - - temp.large = 0; - - current_size = A_SIZE_8(agp_bridge.current_size); - intel_i460_write_agpsiz(current_size->size_value); - - /* - * Do the necessary rigmarole to read all eight bytes of APBASE. - * This has to be done since the AGP aperture can be above 4GB on - * 460 based systems. - */ - pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0])); - pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1])); - - /* Clear BAR control bits */ - agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); - - pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); - pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, - (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); - - /* - * Initialize partial allocation trackers if a GART page is bigger than - * a kernel page. - */ - if (I460_CPAGES_PER_KPAGE >= 1) { - intel_i460_cpk = 1; - } else { - intel_i460_cpk = 0; - - i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries); - i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries); - - for (i = 0; i < current_size->num_entries; i++) { - i460_pg_count[i] = 0; - i460_pg_detail[i] = NULL; - } - } - return 0; -} - -static int intel_i460_create_gatt_table(void) -{ - char *table; - int i; - int page_order; - int num_entries; - void *temp; - - /* - * Load up the fixed address of the GART SRAMS which hold our - * GATT table. - */ - table = (char *) __va(INTEL_I460_ATTBASE); - - temp = agp_bridge.current_size; - page_order = A_SIZE_8(temp)->page_order; - num_entries = A_SIZE_8(temp)->num_entries; - - agp_bridge.gatt_table_real = (u32 *) table; - agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), - (PAGE_SIZE * (1 << page_order))); - agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); - - for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = 0; - } - - intel_i460_read_back(agp_bridge.gatt_table + i - 1); - return 0; -} - -static int intel_i460_free_gatt_table(void) -{ - int num_entries; - int i; - void *temp; - - temp = agp_bridge.current_size; - - num_entries = A_SIZE_8(temp)->num_entries; - - for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = 0; - } - - intel_i460_read_back(agp_bridge.gatt_table + i - 1); - - iounmap(agp_bridge.gatt_table); - return 0; -} - -/* These functions are called when PAGE_SIZE exceeds the GART page size */ - -static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type) -{ - int i, j, k, num_entries; - void *temp; - unsigned long paddr; - - /* - * The rest of the kernel will compute page offsets in terms of - * PAGE_SIZE. - */ - pg_start = I460_CPAGES_PER_KPAGE * pg_start; - - temp = agp_bridge.current_size; - num_entries = A_SIZE_8(temp)->num_entries; - - if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { - printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); - return -EINVAL; - } - - j = pg_start; - while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { - return -EBUSY; - } - j++; - } - -#if 0 - /* not necessary since 460 GART is operated in coherent mode... */ - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } -#endif - - for (i = 0, j = pg_start; i < mem->page_count; i++) { - paddr = mem->memory[i]; - for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) - agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type); - } - - intel_i460_read_back(agp_bridge.gatt_table + j - 1); - return 0; -} - -static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type) -{ - int i; - - pg_start = I460_CPAGES_PER_KPAGE * pg_start; - - for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++) - agp_bridge.gatt_table[i] = 0; - - intel_i460_read_back(agp_bridge.gatt_table + i - 1); - return 0; -} - -/* - * These functions are called when the GART page size exceeds PAGE_SIZE. - * - * This situation is interesting since AGP memory allocations that are - * smaller than a single GART page are possible. The structures i460_pg_count - * and i460_pg_detail track partial allocation of the large GART pages to - * work around this issue. - * - * i460_pg_count[pg_num] tracks the number of kernel pages in use within - * GART page pg_num. i460_pg_detail[pg_num] is an array containing a - * psuedo-GART entry for each of the aforementioned kernel pages. The whole - * of i460_pg_detail is equivalent to a giant GATT with page size equal to - * that of the kernel. - */ - -static void *intel_i460_alloc_large_page(int pg_num) -{ - int i; - void *bp, *bp_end; - struct page *page; - - i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE); - if (i460_pg_detail[pg_num] == NULL) { - printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); - return NULL; - } - - for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) - i460_pg_detail[pg_num][i] = 0; - - bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT); - if (bp == NULL) { - printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); - return NULL; - } - - bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); - - for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { - atomic_inc(&agp_bridge.current_memory_agp); - } - return bp; -} - -static void intel_i460_free_large_page(int pg_num, unsigned long addr) -{ - struct page *page; - void *bp, *bp_end; - - bp = (void *) __va(addr); - bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))); - - vfree(i460_pg_detail[pg_num]); - i460_pg_detail[pg_num] = NULL; - - for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { - atomic_dec(&agp_bridge.current_memory_agp); - } - - free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); -} - -static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type) -{ - int i, pg, start_pg, end_pg, start_offset, end_offset, idx; - int num_entries; - void *temp; - unsigned long paddr; - - temp = agp_bridge.current_size; - num_entries = A_SIZE_8(temp)->num_entries; - - /* Figure out what pg_start means in terms of our large GART pages */ - start_pg = pg_start / I460_KPAGES_PER_CPAGE; - start_offset = pg_start % I460_KPAGES_PER_CPAGE; - end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; - end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; - - if (end_pg > num_entries) { - printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); - return -EINVAL; - } - - /* Check if the requested region of the aperture is free */ - for (pg = start_pg; pg <= end_pg; pg++) { - /* Allocate new GART pages if necessary */ - if (i460_pg_detail[pg] == NULL) { - temp = intel_i460_alloc_large_page(pg); - if (temp == NULL) - return -ENOMEM; - agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp, - 0); - intel_i460_read_back(agp_bridge.gatt_table + pg); - } - - for (idx = ((pg == start_pg) ? start_offset : 0); - idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); - idx++) - { - if (i460_pg_detail[pg][idx] != 0) - return -EBUSY; - } - } - -#if 0 - /* not necessary since 460 GART is operated in coherent mode... */ - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } -#endif - - for (pg = start_pg, i = 0; pg <= end_pg; pg++) { - paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); - for (idx = ((pg == start_pg) ? start_offset : 0); - idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); - idx++, i++) - { - mem->memory[i] = paddr + (idx * PAGE_SIZE); - i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i], - mem->type); - i460_pg_count[pg]++; - } - } - - return 0; -} - -static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type) -{ - int i, pg, start_pg, end_pg, start_offset, end_offset, idx; - int num_entries; - void *temp; - unsigned long paddr; - - temp = agp_bridge.current_size; - num_entries = A_SIZE_8(temp)->num_entries; - - /* Figure out what pg_start means in terms of our large GART pages */ - start_pg = pg_start / I460_KPAGES_PER_CPAGE; - start_offset = pg_start % I460_KPAGES_PER_CPAGE; - end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; - end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; - - for (i = 0, pg = start_pg; pg <= end_pg; pg++) { - for (idx = ((pg == start_pg) ? start_offset : 0); - idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); - idx++, i++) - { - mem->memory[i] = 0; - i460_pg_detail[pg][idx] = 0; - i460_pg_count[pg]--; - } - - /* Free GART pages if they are unused */ - if (i460_pg_count[pg] == 0) { - paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); - agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; - intel_i460_read_back(agp_bridge.gatt_table + pg); - intel_i460_free_large_page(pg, paddr); - } - } - return 0; -} - -/* Dummy routines to call the approriate {cpk,kpc} function */ - -static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type) -{ - if (intel_i460_cpk) - return intel_i460_insert_memory_cpk(mem, pg_start, type); - else - return intel_i460_insert_memory_kpc(mem, pg_start, type); -} - -static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type) -{ - if (intel_i460_cpk) - return intel_i460_remove_memory_cpk(mem, pg_start, type); - else - return intel_i460_remove_memory_kpc(mem, pg_start, type); -} - -/* - * If the kernel page size is smaller that the chipset page size, we don't - * want to allocate memory until we know where it is to be bound in the - * aperture (a multi-kernel-page alloc might fit inside of an already - * allocated GART page). Consequently, don't allocate or free anything - * if i460_cpk (meaning chipset pages per kernel page) isn't set. - * - * Let's just hope nobody counts on the allocated AGP memory being there - * before bind time (I don't think current drivers do)... - */ -static void * intel_i460_alloc_page(void) -{ - if (intel_i460_cpk) - return agp_generic_alloc_page(); - - /* Returning NULL would cause problems */ - /* AK: really dubious code. */ - return (void *)~0UL; -} - -static void intel_i460_destroy_page(void *page) -{ - if (intel_i460_cpk) - agp_generic_destroy_page(page); -} - -static gatt_mask intel_i460_masks[] = -{ - { - INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, - 0 - } -}; - -static unsigned long intel_i460_mask_memory(unsigned long addr, int type) -{ - /* Make sure the returned address is a valid GATT entry */ - return (agp_bridge.masks[0].mask - | (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); -} - -static unsigned long intel_i460_unmask_memory(unsigned long addr) -{ - /* Turn a GATT entry into a physical address */ - return ((addr & 0xffffff) << 12); -} - -static aper_size_info_8 intel_i460_sizes[3] = -{ - /* - * The 32GB aperture is only available with a 4M GART page size. - * Due to the dynamic GART page size, we can't figure out page_order - * or num_entries until runtime. - */ - {32768, 0, 0, 4}, - {1024, 0, 0, 2}, - {256, 0, 0, 1} -}; - -static int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused))) -{ - agp_bridge.masks = intel_i460_masks; - agp_bridge.aperture_sizes = (void *) intel_i460_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 3; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_i460_configure; - agp_bridge.fetch_size = intel_i460_fetch_size; - agp_bridge.cleanup = intel_i460_cleanup; - agp_bridge.tlb_flush = intel_i460_tlb_flush; - agp_bridge.mask_memory = intel_i460_mask_memory; - agp_bridge.unmask_memory = intel_i460_unmask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = intel_i460_create_gatt_table; - agp_bridge.free_gatt_table = intel_i460_free_gatt_table; - agp_bridge.insert_memory = intel_i460_insert_memory; - agp_bridge.remove_memory = intel_i460_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = intel_i460_alloc_page; - agp_bridge.agp_destroy_page = intel_i460_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 1; - return 0; -} - -#endif /* CONFIG_AGP_I460 */ - -#ifdef CONFIG_AGP_INTEL - -static int intel_fetch_size(void) -{ - int i; - u16 temp; - aper_size_info_16 *values; - - pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); - values = A_SIZE_16(agp_bridge.aperture_sizes); - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - - -static int intel_8xx_fetch_size(void) -{ - int i; - u8 temp; - aper_size_info_8 *values; - - pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp); - values = A_SIZE_8(agp_bridge.aperture_sizes); - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static void intel_tlbflush(agp_memory * mem) -{ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); -} - - -static void intel_8xx_tlbflush(agp_memory * mem) -{ - u32 temp; - pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7)); - pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp); - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7)); -} - - -static void intel_cleanup(void) -{ - u16 temp; - aper_size_info_16 *previous_size; - - previous_size = A_SIZE_16(agp_bridge.previous_size); - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, - previous_size->size_value); -} - - -static void intel_8xx_cleanup(void) -{ - u16 temp; - aper_size_info_8 *previous_size; - - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - previous_size->size_value); -} - - -static int intel_configure(void) -{ - u32 temp; - u16 temp2; - aper_size_info_16 *current_size; - - current_size = A_SIZE_16(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); - - /* paccfg/nbxcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, - (temp2 & ~(1 << 10)) | (1 << 9)); - /* clear any possible error conditions */ - pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); - return 0; -} - -static void intel_820_tlbflush(agp_memory * mem) -{ - return; -} - -static void intel_820_cleanup(void) -{ - u8 temp; - aper_size_info_8 *previous_size; - - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp); - pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, - temp & ~(1 << 1)); - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - previous_size->size_value); -} - - -static int intel_820_configure(void) -{ - u32 temp; - u8 temp2; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); - - /* global enable aperture access */ - /* This flag is not accessed through MCHCFG register as in */ - /* i850 chipset. */ - pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2); - pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, - temp2 | (1 << 1)); - /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); - return 0; -} - -static int intel_840_configure(void) -{ - u32 temp; - u16 temp2; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); - - /* mcgcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, - temp2 | (1 << 9)); - /* clear any possible error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); - return 0; -} - -static int intel_845_configure(void) -{ - u32 temp; - u8 temp2; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); - - /* agpm */ - pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2); - pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM, - temp2 | (1 << 1)); - /* clear any possible error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); - return 0; -} - -static int intel_850_configure(void) -{ - u32 temp; - u16 temp2; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); - - /* mcgcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, - temp2 | (1 << 9)); - /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); - return 0; -} - -static int intel_860_configure(void) -{ - u32 temp; - u16 temp2; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); - - /* mcgcfg */ - pci_read_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, - temp2 | (1 << 9)); - /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I860_ERRSTS, 0xf700); - return 0; -} - -static int intel_830mp_configure(void) -{ - u32 temp; - u16 temp2; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, - current_size->size_value); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* attbase - aperture base */ - pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* agpctrl */ - pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); - - /* gmch */ - pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); - pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, - temp2 | (1 << 9)); - /* clear any possible AGP-related error conditions */ - pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c); - return 0; -} - -static unsigned long intel_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static void intel_resume(void) -{ - intel_configure(); -} - -/* Setup function */ -static gatt_mask intel_generic_masks[] = -{ - {0x00000017, 0} -}; - -static aper_size_info_8 intel_8xx_sizes[7] = -{ - {256, 65536, 6, 0}, - {128, 32768, 5, 32}, - {64, 16384, 4, 48}, - {32, 8192, 3, 56}, - {16, 4096, 2, 60}, - {8, 2048, 1, 62}, - {4, 1024, 0, 63} -}; - -static aper_size_info_16 intel_generic_sizes[7] = -{ - {256, 65536, 6, 0}, - {128, 32768, 5, 32}, - {64, 16384, 4, 48}, - {32, 8192, 3, 56}, - {16, 4096, 2, 60}, - {8, 2048, 1, 62}, - {4, 1024, 0, 63} -}; - -static aper_size_info_8 intel_830mp_sizes[4] = -{ - {256, 65536, 6, 0}, - {128, 32768, 5, 32}, - {64, 16384, 4, 48}, - {32, 8192, 3, 56} -}; - -static int __init intel_generic_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_generic_sizes; - agp_bridge.size_type = U16_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_configure; - agp_bridge.fetch_size = intel_fetch_size; - agp_bridge.cleanup = intel_cleanup; - agp_bridge.tlb_flush = intel_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = intel_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - - -static int __init intel_820_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_820_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_820_cleanup; - agp_bridge.tlb_flush = intel_820_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -static int __init intel_830mp_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_830mp_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 4; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_830mp_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -static int __init intel_840_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_840_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -static int __init intel_845_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_845_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -static int __init intel_850_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_850_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -static int __init intel_860_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_860_configure; - agp_bridge.fetch_size = intel_8xx_fetch_size; - agp_bridge.cleanup = intel_8xx_cleanup; - agp_bridge.tlb_flush = intel_8xx_tlbflush; - agp_bridge.mask_memory = intel_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -#endif /* CONFIG_AGP_INTEL */ - -#ifdef CONFIG_AGP_VIA - -static int via_fetch_size(void) -{ - int i; - u8 temp; - aper_size_info_8 *values; - - values = A_SIZE_8(agp_bridge.aperture_sizes); - pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static int via_configure(void) -{ - u32 temp; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - /* aperture size */ - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, - current_size->size_value); - /* address to map too */ - pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* GART control register */ - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); - - /* attbase - aperture GATT base */ - pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, - (agp_bridge.gatt_bus_addr & 0xfffff000) | 3); - return 0; -} - -static void via_cleanup(void) -{ - aper_size_info_8 *previous_size; - - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, - previous_size->size_value); - /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up - * during reinitialization. - */ -} - -static void via_tlbflush(agp_memory * mem) -{ - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f); - pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); -} - -static unsigned long via_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static aper_size_info_8 via_generic_sizes[7] = -{ - {256, 65536, 6, 0}, - {128, 32768, 5, 128}, - {64, 16384, 4, 192}, - {32, 8192, 3, 224}, - {16, 4096, 2, 240}, - {8, 2048, 1, 248}, - {4, 1024, 0, 252} -}; - -static gatt_mask via_generic_masks[] = -{ - {0x00000000, 0} -}; - -static int __init via_generic_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = via_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) via_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = via_configure; - agp_bridge.fetch_size = via_fetch_size; - agp_bridge.cleanup = via_cleanup; - agp_bridge.tlb_flush = via_tlbflush; - agp_bridge.mask_memory = via_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -#endif /* CONFIG_AGP_VIA */ - -#ifdef CONFIG_AGP_SIS - -static int sis_fetch_size(void) -{ - u8 temp_size; - int i; - aper_size_info_8 *values; - - pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); - values = A_SIZE_8(agp_bridge.aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if ((temp_size == values[i].size_value) || - ((temp_size & ~(0x03)) == - (values[i].size_value & ~(0x03)))) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - - -static void sis_tlbflush(agp_memory * mem) -{ - pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02); -} - -static int sis_configure(void) -{ - u32 temp; - aper_size_info_8 *current_size; - - current_size = A_SIZE_8(agp_bridge.current_size); - pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); - pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, - agp_bridge.gatt_bus_addr); - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, - current_size->size_value); - return 0; -} - -static void sis_cleanup(void) -{ - aper_size_info_8 *previous_size; - - previous_size = A_SIZE_8(agp_bridge.previous_size); - pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, - (previous_size->size_value & ~(0x03))); -} - -static unsigned long sis_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static aper_size_info_8 sis_generic_sizes[7] = -{ - {256, 65536, 6, 99}, - {128, 32768, 5, 83}, - {64, 16384, 4, 67}, - {32, 8192, 3, 51}, - {16, 4096, 2, 35}, - {8, 2048, 1, 19}, - {4, 1024, 0, 3} -}; - -static gatt_mask sis_generic_masks[] = -{ - {0x00000000, 0} -}; - -static int __init sis_generic_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = sis_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) sis_generic_sizes; - agp_bridge.size_type = U8_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = sis_configure; - agp_bridge.fetch_size = sis_fetch_size; - agp_bridge.cleanup = sis_cleanup; - agp_bridge.tlb_flush = sis_tlbflush; - agp_bridge.mask_memory = sis_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; -} - -#endif /* CONFIG_AGP_SIS */ - -#ifdef CONFIG_AGP_AMD - -typedef struct _amd_page_map { - unsigned long *real; - unsigned long *remapped; -} amd_page_map; - -static struct _amd_irongate_private { - volatile u8 *registers; - amd_page_map **gatt_pages; - int num_tables; -} amd_irongate_private; - -static int amd_create_page_map(amd_page_map *page_map) -{ - int i; - - page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); - if (page_map->real == NULL) { - return -ENOMEM; - } - SetPageReserved(virt_to_page(page_map->real)); - CACHE_FLUSH(); - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), - PAGE_SIZE); - if (page_map->remapped == NULL) { - ClearPageReserved(virt_to_page(page_map->real)); - free_page((unsigned long) page_map->real); - page_map->real = NULL; - return -ENOMEM; - } - CACHE_FLUSH(); - - for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { - page_map->remapped[i] = agp_bridge.scratch_page; - } - - return 0; -} - -static void amd_free_page_map(amd_page_map *page_map) -{ - iounmap(page_map->remapped); - ClearPageReserved(virt_to_page(page_map->real)); - free_page((unsigned long) page_map->real); -} - -static void amd_free_gatt_pages(void) -{ - int i; - amd_page_map **tables; - amd_page_map *entry; - - tables = amd_irongate_private.gatt_pages; - for(i = 0; i < amd_irongate_private.num_tables; i++) { - entry = tables[i]; - if (entry != NULL) { - if (entry->real != NULL) { - amd_free_page_map(entry); - } - kfree(entry); - } - } - kfree(tables); -} - -static int amd_create_gatt_pages(int nr_tables) -{ - amd_page_map **tables; - amd_page_map *entry; - int retval = 0; - int i; - - tables = kmalloc((nr_tables + 1) * sizeof(amd_page_map *), - GFP_KERNEL); - if (tables == NULL) { - return -ENOMEM; - } - memset(tables, 0, sizeof(amd_page_map *) * (nr_tables + 1)); - for (i = 0; i < nr_tables; i++) { - entry = kmalloc(sizeof(amd_page_map), GFP_KERNEL); - if (entry == NULL) { - retval = -ENOMEM; - break; - } - memset(entry, 0, sizeof(amd_page_map)); - tables[i] = entry; - retval = amd_create_page_map(entry); - if (retval != 0) break; - } - amd_irongate_private.num_tables = nr_tables; - amd_irongate_private.gatt_pages = tables; - - if (retval != 0) amd_free_gatt_pages(); - - return retval; -} - -/* Since we don't need contigious memory we just try - * to get the gatt table once - */ - -#define GET_PAGE_DIR_OFF(addr) (addr >> 22) -#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ - GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) -#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) -#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ - GET_PAGE_DIR_IDX(addr)]->remapped) - -static int amd_create_gatt_table(void) -{ - aper_size_info_lvl2 *value; - amd_page_map page_dir; - unsigned long addr; - int retval; - u32 temp; - int i; - - value = A_SIZE_LVL2(agp_bridge.current_size); - retval = amd_create_page_map(&page_dir); - if (retval != 0) { - return retval; - } - - retval = amd_create_gatt_pages(value->num_entries / 1024); - if (retval != 0) { - amd_free_page_map(&page_dir); - return retval; - } - - agp_bridge.gatt_table_real = page_dir.real; - agp_bridge.gatt_table = page_dir.remapped; - agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); - - /* Get the address for the gart region. - * This is a bus address even on the alpha, b/c its - * used to program the agp master not the cpu - */ - - pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); - addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - agp_bridge.gart_bus_addr = addr; - - /* Calculate the agp offset */ - for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { - page_dir.remapped[GET_PAGE_DIR_OFF(addr)] = - virt_to_phys(amd_irongate_private.gatt_pages[i]->real); - page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001; - } - - return 0; -} - -static int amd_free_gatt_table(void) -{ - amd_page_map page_dir; - - page_dir.real = agp_bridge.gatt_table_real; - page_dir.remapped = agp_bridge.gatt_table; - - amd_free_gatt_pages(); - amd_free_page_map(&page_dir); - return 0; -} - -static int amd_irongate_fetch_size(void) -{ - int i; - u32 temp; - aper_size_info_lvl2 *values; - - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = (temp & 0x0000000e); - values = A_SIZE_LVL2(agp_bridge.aperture_sizes); - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static int amd_irongate_configure(void) -{ - aper_size_info_lvl2 *current_size; - u32 temp; - u16 enable_reg; - - current_size = A_SIZE_LVL2(agp_bridge.current_size); - - /* Get the memory mapped registers */ - pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); - temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); - amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096); - - /* Write out the address of the gatt table */ - OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, - agp_bridge.gatt_bus_addr); - - /* Write the Sync register */ - pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); - - /* Set indexing mode */ - pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00); - - /* Write the enable register */ - enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); - enable_reg = (enable_reg | 0x0004); - OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); - - /* Write out the size register */ - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = (((temp & ~(0x0000000e)) | current_size->size_value) - | 0x00000001); - pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); - - /* Flush the tlb */ - OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); - - return 0; -} - -static void amd_irongate_cleanup(void) -{ - aper_size_info_lvl2 *previous_size; - u32 temp; - u16 enable_reg; - - previous_size = A_SIZE_LVL2(agp_bridge.previous_size); - - enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); - enable_reg = (enable_reg & ~(0x0004)); - OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); - - /* Write back the previous size and disable gart translation */ - pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); - temp = ((temp & ~(0x0000000f)) | previous_size->size_value); - pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); - iounmap((void *) amd_irongate_private.registers); -} - -/* - * This routine could be implemented by taking the addresses - * written to the GATT, and flushing them individually. However - * currently it just flushes the whole table. Which is probably - * more efficent, since agp_memory blocks can be a large number of - * entries. - */ - -static void amd_irongate_tlbflush(agp_memory * temp) -{ - OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); -} - -static unsigned long amd_irongate_mask_memory(unsigned long addr, int type) -{ - /* Only type 0 is supported by the irongate */ - - return addr | agp_bridge.masks[0].mask; -} - -static int amd_insert_memory(agp_memory * mem, - off_t pg_start, int type) -{ - int i, j, num_entries; - unsigned long *cur_gatt; - unsigned long addr; - - num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - if ((pg_start + mem->page_count) > num_entries) { - return -EINVAL; - } - - j = pg_start; - while (j < (pg_start + mem->page_count)) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; - cur_gatt = GET_GATT(addr); - if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { - return -EBUSY; - } - j++; - } - - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; - cur_gatt = GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; - } - agp_bridge.tlb_flush(mem); - return 0; -} - -static int amd_remove_memory(agp_memory * mem, off_t pg_start, - int type) -{ - int i; - unsigned long *cur_gatt; - unsigned long addr; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; - cur_gatt = GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = - (unsigned long) agp_bridge.scratch_page; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static aper_size_info_lvl2 amd_irongate_sizes[7] = -{ - {2048, 524288, 0x0000000c}, - {1024, 262144, 0x0000000a}, - {512, 131072, 0x00000008}, - {256, 65536, 0x00000006}, - {128, 32768, 0x00000004}, - {64, 16384, 0x00000002}, - {32, 8192, 0x00000000} -}; - -static gatt_mask amd_irongate_masks[] = -{ - {0x00000001, 0} -}; - -static int __init amd_irongate_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = amd_irongate_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; - agp_bridge.size_type = LVL2_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = (void *) &amd_irongate_private; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = amd_irongate_configure; - agp_bridge.fetch_size = amd_irongate_fetch_size; - agp_bridge.cleanup = amd_irongate_cleanup; - agp_bridge.tlb_flush = amd_irongate_tlbflush; - agp_bridge.mask_memory = amd_irongate_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = amd_create_gatt_table; - agp_bridge.free_gatt_table = amd_free_gatt_table; - agp_bridge.insert_memory = amd_insert_memory; - agp_bridge.remove_memory = amd_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -#endif /* CONFIG_AGP_AMD */ - -#ifdef CONFIG_AGP_ALI - -static int ali_fetch_size(void) -{ - int i; - u32 temp; - aper_size_info_32 *values; - - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); - temp &= ~(0xfffffff0); - values = A_SIZE_32(agp_bridge.aperture_sizes); - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static void ali_tlbflush(agp_memory * mem) -{ - u32 temp; - - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); -// clear tag - pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL, - ((temp & 0xfffffff0) | 0x00000001|0x00000002)); -} - -static void ali_cleanup(void) -{ - aper_size_info_32 *previous_size; - u32 temp; - - previous_size = A_SIZE_32(agp_bridge.previous_size); - - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); -// clear tag - pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL, - ((temp & 0xffffff00) | 0x00000001|0x00000002)); - - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, - ((temp & 0x00000ff0) | previous_size->size_value)); -} - -static int ali_configure(void) -{ - u32 temp; - aper_size_info_32 *current_size; - - current_size = A_SIZE_32(agp_bridge.current_size); - - /* aperture size and gatt addr */ - pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); - temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000)) - | (current_size->size_value & 0xf)); - pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp); - - /* tlb control */ - - /* - * Question: Jeff, ALi's patch deletes this: - * - * pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); - * pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, - * ((temp & 0xffffff00) | 0x00000010)); - * - * and replaces it with the following, which seems to duplicate the - * next couple of lines below it. I suspect this was an oversight, - * but you might want to check up on this? - */ - - pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* address to map to */ - pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - -#if 0 - if (agp_bridge.type == ALI_M1541) { - u32 nlvm_addr = 0; - - switch (current_size->size_value) { - case 0: break; - case 1: nlvm_addr = 0x100000;break; - case 2: nlvm_addr = 0x200000;break; - case 3: nlvm_addr = 0x400000;break; - case 4: nlvm_addr = 0x800000;break; - case 6: nlvm_addr = 0x1000000;break; - case 7: nlvm_addr = 0x2000000;break; - case 8: nlvm_addr = 0x4000000;break; - case 9: nlvm_addr = 0x8000000;break; - case 10: nlvm_addr = 0x10000000;break; - default: break; - } - nlvm_addr--; - nlvm_addr&=0xfff00000; - - nlvm_addr+= agp_bridge.gart_bus_addr; - nlvm_addr|=(agp_bridge.gart_bus_addr>>12); - printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); - } -#endif - - pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); - temp &= 0xffffff7f; //enable TLB - pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp); - - return 0; -} - -static unsigned long ali_mask_memory(unsigned long addr, int type) -{ - /* Memory type is ignored */ - - return addr | agp_bridge.masks[0].mask; -} - -static void ali_cache_flush(void) -{ - global_cache_flush(); - - if (agp_bridge.type == ALI_M1541) { - int i, page_count; - u32 temp; - - page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order; - for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { - pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, - (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - (agp_bridge.gatt_bus_addr + i)) | - ALI_CACHE_FLUSH_EN)); - } - } -} - -static void *ali_alloc_page(void) -{ - void *adr = agp_generic_alloc_page(); - unsigned temp; - - if (adr == 0) - return 0; - - if (agp_bridge.type == ALI_M1541) { - pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, - (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - virt_to_phys(adr)) | - ALI_CACHE_FLUSH_EN )); - } - return adr; -} - -static void ali_destroy_page(void * addr) -{ - u32 temp; - - if (addr == NULL) - return; - - global_cache_flush(); - - if (agp_bridge.type == ALI_M1541) { - pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); - pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, - (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | - virt_to_phys(addr)) | - ALI_CACHE_FLUSH_EN)); - } - - agp_generic_destroy_page(addr); -} - -/* Setup function */ -static gatt_mask ali_generic_masks[] = -{ - {0x00000000, 0} -}; - -static aper_size_info_32 ali_generic_sizes[7] = -{ - {256, 65536, 6, 10}, - {128, 32768, 5, 9}, - {64, 16384, 4, 8}, - {32, 8192, 3, 7}, - {16, 4096, 2, 6}, - {8, 2048, 1, 4}, - {4, 1024, 0, 3} -}; - -static int __init ali_generic_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = ali_generic_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) ali_generic_sizes; - agp_bridge.size_type = U32_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = NULL; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = ali_configure; - agp_bridge.fetch_size = ali_fetch_size; - agp_bridge.cleanup = ali_cleanup; - agp_bridge.tlb_flush = ali_tlbflush; - agp_bridge.mask_memory = ali_mask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = ali_cache_flush; - agp_bridge.create_gatt_table = agp_generic_create_gatt_table; - agp_bridge.free_gatt_table = agp_generic_free_gatt_table; - agp_bridge.insert_memory = agp_generic_insert_memory; - agp_bridge.remove_memory = agp_generic_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = ali_alloc_page; - agp_bridge.agp_destroy_page = ali_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - return 0; - - (void) pdev; /* unused */ -} - -#endif /* CONFIG_AGP_ALI */ - -#ifdef CONFIG_AGP_SWORKS -typedef struct _serverworks_page_map { - unsigned long *real; - unsigned long *remapped; -} serverworks_page_map; - -static struct _serverworks_private { - struct pci_dev *svrwrks_dev; /* device one */ - volatile u8 *registers; - serverworks_page_map **gatt_pages; - int num_tables; - serverworks_page_map scratch_dir; - - int gart_addr_ofs; - int mm_addr_ofs; -} serverworks_private; - -static int serverworks_create_page_map(serverworks_page_map *page_map) -{ - int i; - - page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); - if (page_map->real == NULL) { - return -ENOMEM; - } - SetPageReserved(virt_to_page(page_map->real)); - CACHE_FLUSH(); - page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), - PAGE_SIZE); - if (page_map->remapped == NULL) { - ClearPageReserved(virt_to_page(page_map->real)); - free_page((unsigned long) page_map->real); - page_map->real = NULL; - return -ENOMEM; - } - CACHE_FLUSH(); - - for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { - page_map->remapped[i] = agp_bridge.scratch_page; - } - - return 0; -} - -static void serverworks_free_page_map(serverworks_page_map *page_map) -{ - iounmap(page_map->remapped); - ClearPageReserved(virt_to_page(page_map->real)); - free_page((unsigned long) page_map->real); -} - -static void serverworks_free_gatt_pages(void) -{ - int i; - serverworks_page_map **tables; - serverworks_page_map *entry; - - tables = serverworks_private.gatt_pages; - for(i = 0; i < serverworks_private.num_tables; i++) { - entry = tables[i]; - if (entry != NULL) { - if (entry->real != NULL) { - serverworks_free_page_map(entry); - } - kfree(entry); - } - } - kfree(tables); -} - -static int serverworks_create_gatt_pages(int nr_tables) -{ - serverworks_page_map **tables; - serverworks_page_map *entry; - int retval = 0; - int i; - - tables = kmalloc((nr_tables + 1) * sizeof(serverworks_page_map *), - GFP_KERNEL); - if (tables == NULL) { - return -ENOMEM; - } - memset(tables, 0, sizeof(serverworks_page_map *) * (nr_tables + 1)); - for (i = 0; i < nr_tables; i++) { - entry = kmalloc(sizeof(serverworks_page_map), GFP_KERNEL); - if (entry == NULL) { - retval = -ENOMEM; - break; - } - memset(entry, 0, sizeof(serverworks_page_map)); - tables[i] = entry; - retval = serverworks_create_page_map(entry); - if (retval != 0) break; - } - serverworks_private.num_tables = nr_tables; - serverworks_private.gatt_pages = tables; - - if (retval != 0) serverworks_free_gatt_pages(); - - return retval; -} - -#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ - GET_PAGE_DIR_IDX(addr)]->remapped) - -#ifndef GET_PAGE_DIR_OFF -#define GET_PAGE_DIR_OFF(addr) (addr >> 22) -#endif - -#ifndef GET_PAGE_DIR_IDX -#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ - GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) -#endif - -#ifndef GET_GATT_OFF -#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) -#endif - -static int serverworks_create_gatt_table(void) -{ - aper_size_info_lvl2 *value; - serverworks_page_map page_dir; - int retval; - u32 temp; - int i; - - value = A_SIZE_LVL2(agp_bridge.current_size); - retval = serverworks_create_page_map(&page_dir); - if (retval != 0) { - return retval; - } - retval = serverworks_create_page_map(&serverworks_private.scratch_dir); - if (retval != 0) { - serverworks_free_page_map(&page_dir); - return retval; - } - /* Create a fake scratch directory */ - for(i = 0; i < 1024; i++) { - serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page; - page_dir.remapped[i] = - virt_to_phys(serverworks_private.scratch_dir.real); - page_dir.remapped[i] |= 0x00000001; - } - - retval = serverworks_create_gatt_pages(value->num_entries / 1024); - if (retval != 0) { - serverworks_free_page_map(&page_dir); - serverworks_free_page_map(&serverworks_private.scratch_dir); - return retval; - } - - agp_bridge.gatt_table_real = page_dir.real; - agp_bridge.gatt_table = page_dir.remapped; - agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); - - /* Get the address for the gart region. - * This is a bus address even on the alpha, b/c its - * used to program the agp master not the cpu - */ - - pci_read_config_dword(agp_bridge.dev, - serverworks_private.gart_addr_ofs, - &temp); - agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - /* Calculate the agp offset */ - - for(i = 0; i < value->num_entries / 1024; i++) { - page_dir.remapped[i] = - virt_to_phys(serverworks_private.gatt_pages[i]->real); - page_dir.remapped[i] |= 0x00000001; - } - - return 0; -} - -static int serverworks_free_gatt_table(void) -{ - serverworks_page_map page_dir; - - page_dir.real = agp_bridge.gatt_table_real; - page_dir.remapped = agp_bridge.gatt_table; - - serverworks_free_gatt_pages(); - serverworks_free_page_map(&page_dir); - serverworks_free_page_map(&serverworks_private.scratch_dir); - return 0; -} - -static int serverworks_fetch_size(void) -{ - int i; - u32 temp; - u32 temp2; - aper_size_info_lvl2 *values; - - values = A_SIZE_LVL2(agp_bridge.aperture_sizes); - pci_read_config_dword(agp_bridge.dev, - serverworks_private.gart_addr_ofs, - &temp); - pci_write_config_dword(agp_bridge.dev, - serverworks_private.gart_addr_ofs, - SVWRKS_SIZE_MASK); - pci_read_config_dword(agp_bridge.dev, - serverworks_private.gart_addr_ofs, - &temp2); - pci_write_config_dword(agp_bridge.dev, - serverworks_private.gart_addr_ofs, - temp); - temp2 &= SVWRKS_SIZE_MASK; - - for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { - if (temp2 == values[i].size_value) { - agp_bridge.previous_size = - agp_bridge.current_size = (void *) (values + i); - - agp_bridge.aperture_size_idx = i; - return values[i].size; - } - } - - return 0; -} - -static int serverworks_configure(void) -{ - aper_size_info_lvl2 *current_size; - u32 temp; - u8 enable_reg; - u8 cap_ptr; - u32 cap_id; - u16 cap_reg; - - current_size = A_SIZE_LVL2(agp_bridge.current_size); - - /* Get the memory mapped registers */ - pci_read_config_dword(agp_bridge.dev, - serverworks_private.mm_addr_ofs, - &temp); - temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); - serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096); - - OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a); - - OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, - agp_bridge.gatt_bus_addr); - - cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND); - cap_reg &= ~0x0007; - cap_reg |= 0x4; - OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg); - - pci_read_config_byte(serverworks_private.svrwrks_dev, - SVWRKS_AGP_ENABLE, &enable_reg); - enable_reg |= 0x1; /* Agp Enable bit */ - pci_write_config_byte(serverworks_private.svrwrks_dev, - SVWRKS_AGP_ENABLE, enable_reg); - agp_bridge.tlb_flush(NULL); - - pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr); - if (cap_ptr != 0x00) { - do { - pci_read_config_dword(serverworks_private.svrwrks_dev, - cap_ptr, &cap_id); - - if ((cap_id & 0xff) != 0x02) - cap_ptr = (cap_id >> 8) & 0xff; - } - while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); - } - agp_bridge.capndx = cap_ptr; - - /* Fill in the mode register */ - pci_read_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + 4, - &agp_bridge.mode); - - pci_read_config_byte(agp_bridge.dev, - SVWRKS_CACHING, - &enable_reg); - enable_reg &= ~0x3; - pci_write_config_byte(agp_bridge.dev, - SVWRKS_CACHING, - enable_reg); - - pci_read_config_byte(agp_bridge.dev, - SVWRKS_FEATURE, - &enable_reg); - enable_reg |= (1<<6); - pci_write_config_byte(agp_bridge.dev, - SVWRKS_FEATURE, - enable_reg); - - return 0; -} - -static void serverworks_cleanup(void) -{ - iounmap((void *) serverworks_private.registers); -} - -/* - * This routine could be implemented by taking the addresses - * written to the GATT, and flushing them individually. However - * currently it just flushes the whole table. Which is probably - * more efficent, since agp_memory blocks can be a large number of - * entries. - */ - -static void serverworks_tlbflush(agp_memory * temp) -{ - unsigned long end; - - OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01); - end = jiffies + 3*HZ; - while(INREG8(serverworks_private.registers, - SVWRKS_POSTFLUSH) == 0x01) { - if((signed)(end - jiffies) <= 0) { - printk(KERN_ERR "Posted write buffer flush took more" - "then 3 seconds\n"); - } - } - OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001); - end = jiffies + 3*HZ; - while(INREG32(serverworks_private.registers, - SVWRKS_DIRFLUSH) == 0x00000001) { - if((signed)(end - jiffies) <= 0) { - printk(KERN_ERR "TLB flush took more" - "then 3 seconds\n"); - } - } -} - -static unsigned long serverworks_mask_memory(unsigned long addr, int type) -{ - /* Only type 0 is supported by the serverworks chipsets */ - - return addr | agp_bridge.masks[0].mask; -} - -static int serverworks_insert_memory(agp_memory * mem, - off_t pg_start, int type) -{ - int i, j, num_entries; - unsigned long *cur_gatt; - unsigned long addr; - - num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - if ((pg_start + mem->page_count) > num_entries) { - return -EINVAL; - } - - j = pg_start; - while (j < (pg_start + mem->page_count)) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; - cur_gatt = SVRWRKS_GET_GATT(addr); - if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { - return -EBUSY; - } - j++; - } - - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; - cur_gatt = SVRWRKS_GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; - } - agp_bridge.tlb_flush(mem); - return 0; -} - -static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, - int type) -{ - int i; - unsigned long *cur_gatt; - unsigned long addr; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - - CACHE_FLUSH(); - agp_bridge.tlb_flush(mem); - - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; - cur_gatt = SVRWRKS_GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = - (unsigned long) agp_bridge.scratch_page; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static gatt_mask serverworks_masks[] = -{ - {0x00000001, 0} -}; - -static aper_size_info_lvl2 serverworks_sizes[7] = -{ - {2048, 524288, 0x80000000}, - {1024, 262144, 0xc0000000}, - {512, 131072, 0xe0000000}, - {256, 65536, 0xf0000000}, - {128, 32768, 0xf8000000}, - {64, 16384, 0xfc000000}, - {32, 8192, 0xfe000000} -}; - -static void serverworks_agp_enable(u32 mode) -{ - struct pci_dev *device = NULL; - u32 command, scratch, cap_id; - u8 cap_ptr; - - pci_read_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + 4, - &command); - - /* - * PASS1: go throu all devices that claim to be - * AGP devices and collect their data. - */ - - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) { - do { - pci_read_config_dword(device, - cap_ptr, &cap_id); - - if ((cap_id & 0xff) != 0x02) - cap_ptr = (cap_id >> 8) & 0xff; - } - while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); - } - if (cap_ptr != 0x00) { - /* - * Ok, here we have a AGP device. Disable impossible - * settings, and adjust the readqueue to the minimum. - */ - - pci_read_config_dword(device, cap_ptr + 4, &scratch); - - /* adjust RQ depth */ - command = - ((command & ~0xff000000) | - min_t(u32, (mode & 0xff000000), - min_t(u32, (command & 0xff000000), - (scratch & 0xff000000)))); - - /* disable SBA if it's not supported */ - if (!((command & 0x00000200) && - (scratch & 0x00000200) && - (mode & 0x00000200))) - command &= ~0x00000200; - - /* disable FW */ - command &= ~0x00000010; - - command &= ~0x00000008; - - if (!((command & 4) && - (scratch & 4) && - (mode & 4))) - command &= ~0x00000004; - - if (!((command & 2) && - (scratch & 2) && - (mode & 2))) - command &= ~0x00000002; - - if (!((command & 1) && - (scratch & 1) && - (mode & 1))) - command &= ~0x00000001; - } - } - /* - * PASS2: Figure out the 4X/2X/1X setting and enable the - * target (our motherboard chipset). - */ - - if (command & 4) { - command &= ~3; /* 4X */ - } - if (command & 2) { - command &= ~5; /* 2X */ - } - if (command & 1) { - command &= ~6; /* 1X */ - } - command |= 0x00000100; - - pci_write_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + 8, - command); - - /* - * PASS3: Go throu all AGP devices and update the - * command registers. - */ - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) - pci_write_config_dword(device, cap_ptr + 8, command); - } -} - -static int __init serverworks_setup (struct pci_dev *pdev) -{ - u32 temp; - u32 temp2; - - serverworks_private.svrwrks_dev = pdev; - - agp_bridge.masks = serverworks_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.aperture_sizes = (void *) serverworks_sizes; - agp_bridge.size_type = LVL2_APER_SIZE; - agp_bridge.num_aperture_sizes = 7; - agp_bridge.dev_private_data = (void *) &serverworks_private; - agp_bridge.needs_scratch_page = TRUE; - agp_bridge.configure = serverworks_configure; - agp_bridge.fetch_size = serverworks_fetch_size; - agp_bridge.cleanup = serverworks_cleanup; - agp_bridge.tlb_flush = serverworks_tlbflush; - agp_bridge.mask_memory = serverworks_mask_memory; - agp_bridge.agp_enable = serverworks_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = serverworks_create_gatt_table; - agp_bridge.free_gatt_table = serverworks_free_gatt_table; - agp_bridge.insert_memory = serverworks_insert_memory; - agp_bridge.remove_memory = serverworks_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.suspend = agp_generic_suspend; - agp_bridge.resume = agp_generic_resume; - agp_bridge.cant_use_aperture = 0; - - pci_read_config_dword(agp_bridge.dev, - SVWRKS_APSIZE, - &temp); - - serverworks_private.gart_addr_ofs = 0x10; - - if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_read_config_dword(agp_bridge.dev, - SVWRKS_APSIZE + 4, - &temp2); - if(temp2 != 0) { - printk("Detected 64 bit aperture address, but top " - "bits are not zero. Disabling agp\n"); - return -ENODEV; - } - serverworks_private.mm_addr_ofs = 0x18; - } else { - serverworks_private.mm_addr_ofs = 0x14; - } - - pci_read_config_dword(agp_bridge.dev, - serverworks_private.mm_addr_ofs, - &temp); - if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_read_config_dword(agp_bridge.dev, - serverworks_private.mm_addr_ofs + 4, - &temp2); - if(temp2 != 0) { - printk("Detected 64 bit MMIO address, but top " - "bits are not zero. Disabling agp\n"); - return -ENODEV; - } - } - - return 0; -} - -#endif /* CONFIG_AGP_SWORKS */ - -#ifdef CONFIG_AGP_HP_ZX1 - -#ifndef log2 -#define log2(x) ffz(~(x)) -#endif - -#define HP_ZX1_IOVA_BASE GB(1UL) -#define HP_ZX1_IOVA_SIZE GB(1UL) -#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) -#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL - -#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL -#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \ - hp_private.io_tlb_shift) - -static aper_size_info_fixed hp_zx1_sizes[] = -{ - {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ -}; - -static gatt_mask hp_zx1_masks[] = -{ - {HP_ZX1_PDIR_VALID_BIT, 0} -}; - -static struct _hp_private { - struct pci_dev *ioc; - volatile u8 *registers; - u64 *io_pdir; // PDIR for entire IOVA - u64 *gatt; // PDIR just for GART (subset of above) - u64 gatt_entries; - u64 iova_base; - u64 gart_base; - u64 gart_size; - u64 io_pdir_size; - int io_pdir_owner; // do we own it, or share it with sba_iommu? - int io_page_size; - int io_tlb_shift; - int io_tlb_ps; // IOC ps config - int io_pages_per_kpage; -} hp_private; - -static int __init hp_zx1_ioc_shared(void) -{ - struct _hp_private *hp = &hp_private; - - printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); - - /* - * IOC already configured by sba_iommu module; just use - * its setup. We assume: - * - IOVA space is 1Gb in size - * - first 512Mb is IOMMU, second 512Mb is GART - */ - hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG); - switch (hp->io_tlb_ps) { - case 0: hp->io_tlb_shift = 12; break; - case 1: hp->io_tlb_shift = 13; break; - case 2: hp->io_tlb_shift = 14; break; - case 3: hp->io_tlb_shift = 16; break; - default: - printk(KERN_ERR PFX "Invalid IOTLB page size " - "configuration 0x%x\n", hp->io_tlb_ps); - hp->gatt = 0; - hp->gatt_entries = 0; - return -ENODEV; - } - hp->io_page_size = 1 << hp->io_tlb_shift; - hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - - hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1; - hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; - - hp->gart_size = HP_ZX1_GART_SIZE; - hp->gatt_entries = hp->gart_size / hp->io_page_size; - - hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE)); - hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; - - if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { - hp->gatt = 0; - hp->gatt_entries = 0; - printk(KERN_ERR PFX "No reserved IO PDIR entry found; " - "GART disabled\n"); - return -ENODEV; - } - - return 0; -} - -static int __init hp_zx1_ioc_owner(u8 ioc_rev) -{ - struct _hp_private *hp = &hp_private; - - printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); - - /* - * Select an IOV page size no larger than system page size. - */ - if (PAGE_SIZE >= KB(64)) { - hp->io_tlb_shift = 16; - hp->io_tlb_ps = 3; - } else if (PAGE_SIZE >= KB(16)) { - hp->io_tlb_shift = 14; - hp->io_tlb_ps = 2; - } else if (PAGE_SIZE >= KB(8)) { - hp->io_tlb_shift = 13; - hp->io_tlb_ps = 1; - } else { - hp->io_tlb_shift = 12; - hp->io_tlb_ps = 0; - } - hp->io_page_size = 1 << hp->io_tlb_shift; - hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - - hp->iova_base = HP_ZX1_IOVA_BASE; - hp->gart_size = HP_ZX1_GART_SIZE; - hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; - - hp->gatt_entries = hp->gart_size / hp->io_page_size; - hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); - - return 0; -} - -static int __init hp_zx1_ioc_init(void) -{ - struct _hp_private *hp = &hp_private; - struct pci_dev *ioc; - int i; - u8 ioc_rev; - - ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); - if (!ioc) { - printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n"); - return -ENODEV; - } - hp->ioc = ioc; - - pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { - hp->registers = (u8 *) ioremap(pci_resource_start(ioc, - i), - pci_resource_len(ioc, i)); - break; - } - } - if (!hp->registers) { - printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n"); - - return -ENODEV; - } - - /* - * If the IOTLB is currently disabled, we can take it over. - * Otherwise, we have to share with sba_iommu. - */ - hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; - - if (hp->io_pdir_owner) - return hp_zx1_ioc_owner(ioc_rev); - - return hp_zx1_ioc_shared(); -} - -static int hp_zx1_fetch_size(void) -{ - int size; - - size = hp_private.gart_size / MB(1); - hp_zx1_sizes[0].size = size; - agp_bridge.current_size = (void *) &hp_zx1_sizes[0]; - return size; -} - -static int hp_zx1_configure(void) -{ - struct _hp_private *hp = &hp_private; - - agp_bridge.gart_bus_addr = hp->gart_base; - agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP); - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); - - if (hp->io_pdir_owner) { - OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, - virt_to_phys(hp->io_pdir)); - OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps); - OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); - OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1); - OUTREG64(hp->registers, HP_ZX1_PCOM, - hp->iova_base | log2(HP_ZX1_IOVA_SIZE)); - INREG64(hp->registers, HP_ZX1_PCOM); - } - - return 0; -} - -static void hp_zx1_cleanup(void) -{ - struct _hp_private *hp = &hp_private; - - if (hp->io_pdir_owner) - OUTREG64(hp->registers, HP_ZX1_IBASE, 0); - iounmap((void *) hp->registers); -} - -static void hp_zx1_tlbflush(agp_memory * mem) -{ - struct _hp_private *hp = &hp_private; - - OUTREG64(hp->registers, HP_ZX1_PCOM, - hp->gart_base | log2(hp->gart_size)); - INREG64(hp->registers, HP_ZX1_PCOM); -} - -static int hp_zx1_create_gatt_table(void) -{ - struct _hp_private *hp = &hp_private; - int i; - - if (hp->io_pdir_owner) { - hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, - get_order(hp->io_pdir_size)); - if (!hp->io_pdir) { - printk(KERN_ERR PFX "Couldn't allocate contiguous " - "memory for I/O PDIR\n"); - hp->gatt = 0; - hp->gatt_entries = 0; - return -ENOMEM; - } - memset(hp->io_pdir, 0, hp->io_pdir_size); - - hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; - } - - for (i = 0; i < hp->gatt_entries; i++) { - hp->gatt[i] = (unsigned long) agp_bridge.scratch_page; - } - - return 0; -} - -static int hp_zx1_free_gatt_table(void) -{ - struct _hp_private *hp = &hp_private; - - if (hp->io_pdir_owner) - free_pages((unsigned long) hp->io_pdir, - get_order(hp->io_pdir_size)); - else - hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; - return 0; -} - -static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type) -{ - struct _hp_private *hp = &hp_private; - int i, k; - off_t j, io_pg_start; - int io_pg_count; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - - io_pg_start = hp->io_pages_per_kpage * pg_start; - io_pg_count = hp->io_pages_per_kpage * mem->page_count; - if ((io_pg_start + io_pg_count) > hp->gatt_entries) { - return -EINVAL; - } - - j = io_pg_start; - while (j < (io_pg_start + io_pg_count)) { - if (hp->gatt[j]) { - return -EBUSY; - } - j++; - } - - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - - for (i = 0, j = io_pg_start; i < mem->page_count; i++) { - unsigned long paddr; - - paddr = mem->memory[i]; - for (k = 0; - k < hp->io_pages_per_kpage; - k++, j++, paddr += hp->io_page_size) { - hp->gatt[j] = agp_bridge.mask_memory(paddr, type); - } - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type) -{ - struct _hp_private *hp = &hp_private; - int i, io_pg_start, io_pg_count; - - if (type != 0 || mem->type != 0) { - return -EINVAL; - } - - io_pg_start = hp->io_pages_per_kpage * pg_start; - io_pg_count = hp->io_pages_per_kpage * mem->page_count; - for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { - hp->gatt[i] = agp_bridge.scratch_page; - } - - agp_bridge.tlb_flush(mem); - return 0; -} - -static unsigned long hp_zx1_mask_memory(unsigned long addr, int type) -{ - return HP_ZX1_PDIR_VALID_BIT | addr; -} - -static unsigned long hp_zx1_unmask_memory(unsigned long addr) -{ - return addr & ~(HP_ZX1_PDIR_VALID_BIT); -} - -static int __init hp_zx1_setup (struct pci_dev *pdev) -{ - agp_bridge.masks = hp_zx1_masks; - agp_bridge.num_of_masks = 1; - agp_bridge.dev_private_data = NULL; - agp_bridge.size_type = FIXED_APER_SIZE; - agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = hp_zx1_configure; - agp_bridge.fetch_size = hp_zx1_fetch_size; - agp_bridge.cleanup = hp_zx1_cleanup; - agp_bridge.tlb_flush = hp_zx1_tlbflush; - agp_bridge.mask_memory = hp_zx1_mask_memory; - agp_bridge.unmask_memory = hp_zx1_unmask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; - agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; - agp_bridge.free_gatt_table = hp_zx1_free_gatt_table; - agp_bridge.insert_memory = hp_zx1_insert_memory; - agp_bridge.remove_memory = hp_zx1_remove_memory; - agp_bridge.alloc_by_type = agp_generic_alloc_by_type; - agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = agp_generic_alloc_page; - agp_bridge.agp_destroy_page = agp_generic_destroy_page; - agp_bridge.cant_use_aperture = 1; - - return hp_zx1_ioc_init(); - - (void) pdev; /* unused */ -} - -#endif /* CONFIG_AGP_HP_ZX1 */ - -/* per-chipset initialization data. - * note -- all chipsets for a single vendor MUST be grouped together - */ -static struct { - unsigned short device_id; /* first, to make table easier to read */ - unsigned short vendor_id; - enum chipset_type chipset; - const char *vendor_name; - const char *chipset_name; - int (*chipset_setup) (struct pci_dev *pdev); -} agp_bridge_info[] __initdata = { - -#ifdef CONFIG_AGP_ALI - { PCI_DEVICE_ID_AL_M1541_0, - PCI_VENDOR_ID_AL, - ALI_M1541, - "Ali", - "M1541", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1621_0, - PCI_VENDOR_ID_AL, - ALI_M1621, - "Ali", - "M1621", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1631_0, - PCI_VENDOR_ID_AL, - ALI_M1631, - "Ali", - "M1631", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1632_0, - PCI_VENDOR_ID_AL, - ALI_M1632, - "Ali", - "M1632", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1641_0, - PCI_VENDOR_ID_AL, - ALI_M1641, - "Ali", - "M1641", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1644_0, - PCI_VENDOR_ID_AL, - ALI_M1644, - "Ali", - "M1644", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1647_0, - PCI_VENDOR_ID_AL, - ALI_M1647, - "Ali", - "M1647", - ali_generic_setup }, - { PCI_DEVICE_ID_AL_M1651_0, - PCI_VENDOR_ID_AL, - ALI_M1651, - "Ali", - "M1651", - ali_generic_setup }, - { 0, - PCI_VENDOR_ID_AL, - ALI_GENERIC, - "Ali", - "Generic", - ali_generic_setup }, -#endif /* CONFIG_AGP_ALI */ - -#ifdef CONFIG_AGP_AMD - { PCI_DEVICE_ID_AMD_IRONGATE_0, - PCI_VENDOR_ID_AMD, - AMD_IRONGATE, - "AMD", - "Irongate", - amd_irongate_setup }, - { PCI_DEVICE_ID_AMD_761_0, - PCI_VENDOR_ID_AMD, - AMD_761, - "AMD", - "761", - amd_irongate_setup }, - { PCI_DEVICE_ID_AMD_762_0, - PCI_VENDOR_ID_AMD, - AMD_762, - "AMD", - "760MP", - amd_irongate_setup }, - { 0, - PCI_VENDOR_ID_AMD, - AMD_GENERIC, - "AMD", - "Generic", - amd_irongate_setup }, -#endif /* CONFIG_AGP_AMD */ - -#ifdef CONFIG_AGP_INTEL - { PCI_DEVICE_ID_INTEL_82443LX_0, - PCI_VENDOR_ID_INTEL, - INTEL_LX, - "Intel", - "440LX", - intel_generic_setup }, - { PCI_DEVICE_ID_INTEL_82443BX_0, - PCI_VENDOR_ID_INTEL, - INTEL_BX, - "Intel", - "440BX", - intel_generic_setup }, - { PCI_DEVICE_ID_INTEL_82443GX_0, - PCI_VENDOR_ID_INTEL, - INTEL_GX, - "Intel", - "440GX", - intel_generic_setup }, - { PCI_DEVICE_ID_INTEL_815_0, - PCI_VENDOR_ID_INTEL, - INTEL_I815, - "Intel", - "i815", - intel_generic_setup }, - { PCI_DEVICE_ID_INTEL_820_0, - PCI_VENDOR_ID_INTEL, - INTEL_I820, - "Intel", - "i820", - intel_820_setup }, - { PCI_DEVICE_ID_INTEL_820_UP_0, - PCI_VENDOR_ID_INTEL, - INTEL_I820, - "Intel", - "i820", - intel_820_setup }, - { PCI_DEVICE_ID_INTEL_830_M_0, - PCI_VENDOR_ID_INTEL, - INTEL_I830_M, - "Intel", - "i830M", - intel_830mp_setup }, - { PCI_DEVICE_ID_INTEL_845_G_0, - PCI_VENDOR_ID_INTEL, - INTEL_I845_G, - "Intel", - "i845G", - intel_830mp_setup }, - { PCI_DEVICE_ID_INTEL_840_0, - PCI_VENDOR_ID_INTEL, - INTEL_I840, - "Intel", - "i840", - intel_840_setup }, - { PCI_DEVICE_ID_INTEL_845_0, - PCI_VENDOR_ID_INTEL, - INTEL_I845, - "Intel", - "i845", - intel_845_setup }, - { PCI_DEVICE_ID_INTEL_850_0, - PCI_VENDOR_ID_INTEL, - INTEL_I850, - "Intel", - "i850", -intel_850_setup }, - { PCI_DEVICE_ID_INTEL_860_0, - PCI_VENDOR_ID_INTEL, - INTEL_I860, - "Intel", - "i860", - intel_860_setup }, - { 0, - PCI_VENDOR_ID_INTEL, - INTEL_GENERIC, - "Intel", - "Generic", - intel_generic_setup }, - -#endif /* CONFIG_AGP_INTEL */ - -#ifdef CONFIG_AGP_SIS - { PCI_DEVICE_ID_SI_740, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "740", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_650, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "650", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_645, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "645", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_735, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "735", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_730, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "730", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_630, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "630", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_540, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "540", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_620, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "620", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_530, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "530", - sis_generic_setup }, - { PCI_DEVICE_ID_SI_550, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "550", - sis_generic_setup }, - { 0, - PCI_VENDOR_ID_SI, - SIS_GENERIC, - "SiS", - "Generic", - sis_generic_setup }, -#endif /* CONFIG_AGP_SIS */ - -#ifdef CONFIG_AGP_VIA - { PCI_DEVICE_ID_VIA_8501_0, - PCI_VENDOR_ID_VIA, - VIA_MVP4, - "Via", - "MVP4", - via_generic_setup }, - { PCI_DEVICE_ID_VIA_82C597_0, - PCI_VENDOR_ID_VIA, - VIA_VP3, - "Via", - "VP3", - via_generic_setup }, - { PCI_DEVICE_ID_VIA_82C598_0, - PCI_VENDOR_ID_VIA, - VIA_MVP3, - "Via", - "MVP3", - via_generic_setup }, - { PCI_DEVICE_ID_VIA_82C691_0, - PCI_VENDOR_ID_VIA, - VIA_APOLLO_PRO, - "Via", - "Apollo Pro", - via_generic_setup }, - { PCI_DEVICE_ID_VIA_8371_0, - PCI_VENDOR_ID_VIA, - VIA_APOLLO_KX133, - "Via", - "Apollo Pro KX133", - via_generic_setup }, - { PCI_DEVICE_ID_VIA_8363_0, - PCI_VENDOR_ID_VIA, - VIA_APOLLO_KT133, - "Via", - "Apollo Pro KT133", - via_generic_setup }, - { PCI_DEVICE_ID_VIA_8367_0, - PCI_VENDOR_ID_VIA, - VIA_APOLLO_KT133, - "Via", - "Apollo Pro KT266", - via_generic_setup }, - { 0, - PCI_VENDOR_ID_VIA, - VIA_GENERIC, - "Via", - "Generic", - via_generic_setup }, -#endif /* CONFIG_AGP_VIA */ - -#ifdef CONFIG_AGP_HP_ZX1 - { PCI_DEVICE_ID_HP_ZX1_LBA, - PCI_VENDOR_ID_HP, - HP_ZX1, - "HP", - "ZX1", - hp_zx1_setup }, -#endif - - { 0, }, /* dummy final entry, always present */ -}; - - -/* scan table above for supported devices */ -static int __init agp_lookup_host_bridge (struct pci_dev *pdev) -{ - int i; - - for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) - if (pdev->vendor == agp_bridge_info[i].vendor_id) - break; - - if (i >= ARRAY_SIZE (agp_bridge_info)) { - printk (KERN_DEBUG PFX "unsupported bridge\n"); - return -ENODEV; - } - - while ((i < ARRAY_SIZE (agp_bridge_info)) && - (agp_bridge_info[i].vendor_id == pdev->vendor)) { - if (pdev->device == agp_bridge_info[i].device_id) { -#ifdef CONFIG_AGP_ALI - if (pdev->device == PCI_DEVICE_ID_AL_M1621_0) { - u8 hidden_1621_id; - - pci_read_config_byte(pdev, 0xFB, &hidden_1621_id); - switch (hidden_1621_id) { - case 0x31: - agp_bridge_info[i].chipset_name="M1631"; - break; - case 0x32: - agp_bridge_info[i].chipset_name="M1632"; - break; - case 0x41: - agp_bridge_info[i].chipset_name="M1641"; - break; - case 0x43: - break; - case 0x47: - agp_bridge_info[i].chipset_name="M1647"; - break; - case 0x51: - agp_bridge_info[i].chipset_name="M1651"; - break; - default: - break; - } - } -#endif - - printk (KERN_INFO PFX "Detected %s %s chipset\n", - agp_bridge_info[i].vendor_name, - agp_bridge_info[i].chipset_name); - agp_bridge.type = agp_bridge_info[i].chipset; - return agp_bridge_info[i].chipset_setup (pdev); - } - - i++; - } - - i--; /* point to vendor generic entry (device_id == 0) */ - - /* try init anyway, if user requests it AND - * there is a 'generic' bridge entry for this vendor */ - if (agp_try_unsupported && agp_bridge_info[i].device_id == 0) { - printk(KERN_WARNING PFX "Trying generic %s routines" - " for device id: %04x\n", - agp_bridge_info[i].vendor_name, pdev->device); - agp_bridge.type = agp_bridge_info[i].chipset; - return agp_bridge_info[i].chipset_setup (pdev); - } - - printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x)," - " you might want to try agp_try_unsupported=1.\n", - agp_bridge_info[i].vendor_name, pdev->device); - return -ENODEV; -} - - -/* Supported Device Scanning routine */ - -static int __init agp_find_supported_device(void) -{ - struct pci_dev *dev = NULL; - u8 cap_ptr = 0x00; - - if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) - return -ENODEV; - - agp_bridge.dev = dev; - - /* Need to test for I810 here */ -#ifdef CONFIG_AGP_I810 - if (dev->vendor == PCI_VENDOR_ID_INTEL) { - struct pci_dev *i810_dev; - - switch (dev->device) { - case PCI_DEVICE_ID_INTEL_810_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_810_1, - NULL); - if (i810_dev == NULL) { - printk(KERN_ERR PFX "Detected an Intel i810," - " but could not find the secondary" - " device.\n"); - return -ENODEV; - } - printk(KERN_INFO PFX "Detected an Intel " - "i810 Chipset.\n"); - agp_bridge.type = INTEL_I810; - return intel_i810_setup (i810_dev); - - case PCI_DEVICE_ID_INTEL_810_DC100_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_810_DC100_1, - NULL); - if (i810_dev == NULL) { - printk(KERN_ERR PFX "Detected an Intel i810 " - "DC100, but could not find the " - "secondary device.\n"); - return -ENODEV; - } - printk(KERN_INFO PFX "Detected an Intel i810 " - "DC100 Chipset.\n"); - agp_bridge.type = INTEL_I810; - return intel_i810_setup(i810_dev); - - case PCI_DEVICE_ID_INTEL_810_E_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_810_E_1, - NULL); - if (i810_dev == NULL) { - printk(KERN_ERR PFX "Detected an Intel i810 E" - ", but could not find the secondary " - "device.\n"); - return -ENODEV; - } - printk(KERN_INFO PFX "Detected an Intel i810 E " - "Chipset.\n"); - agp_bridge.type = INTEL_I810; - return intel_i810_setup(i810_dev); - - case PCI_DEVICE_ID_INTEL_815_0: - /* The i815 can operate either as an i810 style - * integrated device, or as an AGP4X motherboard. - * - * This only addresses the first mode: - */ - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_815_1, - NULL); - if (i810_dev == NULL) { - printk(KERN_ERR PFX "agpgart: Detected an " - "Intel i815, but could not find the" - " secondary device. Assuming a " - "non-integrated video card.\n"); - break; - } - printk(KERN_INFO PFX "agpgart: Detected an Intel i815 " - "Chipset.\n"); - agp_bridge.type = INTEL_I810; - return intel_i810_setup(i810_dev); - - case PCI_DEVICE_ID_INTEL_845_G_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_845_G_1, NULL); - if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_845_G_1, i810_dev); - } - - if (i810_dev == NULL) { - /* - * We probably have a I845MP chipset - * with an external graphics - * card. It will be initialized later - */ - agp_bridge.type = INTEL_I845_G; - break; - } - printk(KERN_INFO PFX "Detected an Intel " - "845G Chipset.\n"); - agp_bridge.type = INTEL_I810; - return intel_i830_setup(i810_dev); - - case PCI_DEVICE_ID_INTEL_830_M_0: - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_830_M_1, - NULL); - if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) { - i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_830_M_1, - i810_dev); - } - - if (i810_dev == NULL) { - /* Intel 830MP with external graphic card */ - /* It will be initialized later */ - agp_bridge.type = INTEL_I830_M; - break; - } - printk(KERN_INFO PFX "Detected an Intel " - "830M Chipset.\n"); - agp_bridge.type = INTEL_I810; - return intel_i830_setup(i810_dev); - default: - break; - } - } -#endif /* CONFIG_AGP_I810 */ - -#ifdef CONFIG_AGP_SWORKS - /* Everything is on func 1 here so we are hardcoding function one */ - if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) { - struct pci_dev *bridge_dev; - - bridge_dev = pci_find_slot ((unsigned int)dev->bus->number, - PCI_DEVFN(0, 1)); - if(bridge_dev == NULL) { - printk(KERN_INFO PFX "agpgart: Detected a Serverworks " - "Chipset, but could not find the secondary " - "device.\n"); - return -ENODEV; - } - - switch (dev->device) { - case PCI_DEVICE_ID_SERVERWORKS_HE: - agp_bridge.type = SVWRKS_HE; - return serverworks_setup(bridge_dev); - - case PCI_DEVICE_ID_SERVERWORKS_LE: - case 0x0007: - agp_bridge.type = SVWRKS_LE; - return serverworks_setup(bridge_dev); - - default: - if(agp_try_unsupported) { - agp_bridge.type = SVWRKS_GENERIC; - return serverworks_setup(bridge_dev); - } - break; - } - } - -#endif /* CONFIG_AGP_SWORKS */ - -#ifdef CONFIG_AGP_HP_ZX1 - if (dev->vendor == PCI_VENDOR_ID_HP) { - do { - /* ZX1 LBAs can be either PCI or AGP bridges */ - if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { - printk(KERN_INFO PFX "Detected HP ZX1 AGP " - "chipset at %s\n", dev->slot_name); - agp_bridge.type = HP_ZX1; - agp_bridge.dev = dev; - return hp_zx1_setup(dev); - } - dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev); - } while (dev); - return -ENODEV; - } -#endif /* CONFIG_AGP_HP_ZX1 */ - - /* find capndx */ - cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); - if (cap_ptr == 0x00) - return -ENODEV; - agp_bridge.capndx = cap_ptr; - - /* Fill in the mode register */ - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + 4, - &agp_bridge.mode); - - /* probe for known chipsets */ - return agp_lookup_host_bridge (dev); -} - -struct agp_max_table { - int mem; - int agp; -}; - -static struct agp_max_table maxes_table[9] __initdata = -{ - {0, 0}, - {32, 4}, - {64, 28}, - {128, 96}, - {256, 204}, - {512, 440}, - {1024, 942}, - {2048, 1920}, - {4096, 3932} -}; - -static int __init agp_find_max (void) -{ - long memory, index, result; - - memory = virt_to_phys(high_memory) >> 20; - index = 1; - - while ((memory > maxes_table[index].mem) && - (index < 8)) { - index++; - } - - result = maxes_table[index - 1].agp + - ( (memory - maxes_table[index - 1].mem) * - (maxes_table[index].agp - maxes_table[index - 1].agp)) / - (maxes_table[index].mem - maxes_table[index - 1].mem); - - printk(KERN_INFO PFX "Maximum main memory to use " - "for agp memory: %ldM\n", result); - result = result << (20 - PAGE_SHIFT); - return result; -} - -#define AGPGART_VERSION_MAJOR 0 -#define AGPGART_VERSION_MINOR 99 - -static agp_version agp_current_version = -{ - AGPGART_VERSION_MAJOR, - AGPGART_VERSION_MINOR -}; - -static int __init agp_backend_initialize(void) -{ - int size_value, rc, got_gatt=0, got_keylist=0; - - memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); - agp_bridge.type = NOT_SUPPORTED; - agp_bridge.max_memory_agp = agp_find_max(); - agp_bridge.version = &agp_current_version; - - rc = agp_find_supported_device(); - if (rc) { - /* not KERN_ERR because error msg should have already printed */ - printk(KERN_DEBUG PFX "no supported devices found.\n"); - return rc; - } - - if (agp_bridge.needs_scratch_page == TRUE) { - void *addr; - addr = agp_bridge.agp_alloc_page(); - - if (addr == NULL) { - printk(KERN_ERR PFX "unable to get memory for " - "scratch page.\n"); - return -ENOMEM; - } - agp_bridge.scratch_page = virt_to_phys(addr); - agp_bridge.scratch_page = - agp_bridge.mask_memory(agp_bridge.scratch_page, 0); - } - - size_value = agp_bridge.fetch_size(); - - if (size_value == 0) { - printk(KERN_ERR PFX "unable to determine aperture size.\n"); - rc = -EINVAL; - goto err_out; - } - if (agp_bridge.create_gatt_table()) { - printk(KERN_ERR PFX "unable to get memory for graphics " - "translation table.\n"); - rc = -ENOMEM; - goto err_out; - } - got_gatt = 1; - - agp_bridge.key_list = vmalloc(PAGE_SIZE * 4); - if (agp_bridge.key_list == NULL) { - printk(KERN_ERR PFX "error allocating memory for key lists.\n"); - rc = -ENOMEM; - goto err_out; - } - got_keylist = 1; - - /* FIXME vmalloc'd memory not guaranteed contiguous */ - memset(agp_bridge.key_list, 0, PAGE_SIZE * 4); - - if (agp_bridge.configure()) { - printk(KERN_ERR PFX "error configuring host chipset.\n"); - rc = -EINVAL; - goto err_out; - } - - printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", - size_value, agp_bridge.gart_bus_addr); - - return 0; - -err_out: - if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page &= ~(0x00000fff); - agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page)); - } - if (got_gatt) - agp_bridge.free_gatt_table(); - if (got_keylist) - vfree(agp_bridge.key_list); - return rc; -} - - -/* cannot be __exit b/c as it could be called from __init code */ -static void agp_backend_cleanup(void) -{ - agp_bridge.cleanup(); - agp_bridge.free_gatt_table(); - vfree(agp_bridge.key_list); - - if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page &= ~(0x00000fff); - agp_bridge.agp_destroy_page(phys_to_virt(agp_bridge.scratch_page)); - } -} - -static int agp_power(struct pm_dev *dev, pm_request_t rq, void *data) -{ - switch(rq) - { - case PM_SUSPEND: - return agp_bridge.suspend(); - case PM_RESUME: - agp_bridge.resume(); - return 0; - } - return 0; -} - -extern int agp_frontend_initialize(void); -extern void agp_frontend_cleanup(void); - -static const drm_agp_t drm_agp = { - &agp_free_memory, - &agp_allocate_memory, - &agp_bind_memory, - &agp_unbind_memory, - &agp_enable, - &agp_backend_acquire, - &agp_backend_release, - &agp_copy_info -}; - -static int __init agp_init(void) -{ - int ret_val; - - printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n", - AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); - - ret_val = agp_backend_initialize(); - if (ret_val) { - agp_bridge.type = NOT_SUPPORTED; - return ret_val; - } - ret_val = agp_frontend_initialize(); - if (ret_val) { - agp_bridge.type = NOT_SUPPORTED; - agp_backend_cleanup(); - return ret_val; - } - - inter_module_register("drm_agp", THIS_MODULE, &drm_agp); - - pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power); - return 0; -} - -static void __exit agp_cleanup(void) -{ - pm_unregister_all(agp_power); - agp_frontend_cleanup(); - agp_backend_cleanup(); - inter_module_unregister("drm_agp"); -} - -module_init(agp_init); -module_exit(agp_cleanup); diff -Nru a/drivers/char/agp/agpgart_fe.c b/drivers/char/agp/agpgart_fe.c --- a/drivers/char/agp/agpgart_fe.c Sat Jul 20 12:12:34 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,1099 +0,0 @@ -/* - * AGPGART module frontend version 0.99 - * Copyright (C) 1999 Jeff Hartmann - * Copyright (C) 1999 Precision Insight, Inc. - * Copyright (C) 1999 Xi Graphics, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE - * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#define __NO_VERSION__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "agp.h" - -static struct agp_front_data agp_fe; - -static agp_memory *agp_find_mem_by_key(int key) -{ - agp_memory *curr; - - if (agp_fe.current_controller == NULL) { - return NULL; - } - curr = agp_fe.current_controller->pool; - - while (curr != NULL) { - if (curr->key == key) { - return curr; - } - curr = curr->next; - } - - return NULL; -} - -static void agp_remove_from_pool(agp_memory * temp) -{ - agp_memory *prev; - agp_memory *next; - - /* Check to see if this is even in the memory pool */ - - if (agp_find_mem_by_key(temp->key) != NULL) { - next = temp->next; - prev = temp->prev; - - if (prev != NULL) { - prev->next = next; - if (next != NULL) { - next->prev = prev; - } - } else { - /* This is the first item on the list */ - if (next != NULL) { - next->prev = NULL; - } - agp_fe.current_controller->pool = next; - } - } -} - -/* - * Routines for managing each client's segment list - - * These routines handle adding and removing segments - * to each auth'ed client. - */ - -static agp_segment_priv *agp_find_seg_in_client(const agp_client * client, - unsigned long offset, - int size, pgprot_t page_prot) -{ - agp_segment_priv *seg; - int num_segments, pg_start, pg_count, i; - - pg_start = offset / 4096; - pg_count = size / 4096; - seg = *(client->segments); - num_segments = client->num_segments; - - for (i = 0; i < client->num_segments; i++) { - if ((seg[i].pg_start == pg_start) && - (seg[i].pg_count == pg_count) && - (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { - return seg + i; - } - } - - return NULL; -} - -static void agp_remove_seg_from_client(agp_client * client) -{ - if (client->segments != NULL) { - if (*(client->segments) != NULL) { - kfree(*(client->segments)); - } - kfree(client->segments); - } -} - -static void agp_add_seg_to_client(agp_client * client, - agp_segment_priv ** seg, int num_segments) -{ - agp_segment_priv **prev_seg; - - prev_seg = client->segments; - - if (prev_seg != NULL) { - agp_remove_seg_from_client(client); - } - client->num_segments = num_segments; - client->segments = seg; -} - -/* Originally taken from linux/mm/mmap.c from the array - * protection_map. - * The original really should be exported to modules, or - * some routine which does the conversion for you - */ - -static const pgprot_t my_protect_map[16] = -{ - __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, - __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 -}; - -static pgprot_t agp_convert_mmap_flags(int prot) -{ -#define _trans(x,bit1,bit2) \ -((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0) - - unsigned long prot_bits; - pgprot_t temp; - - prot_bits = _trans(prot, PROT_READ, VM_READ) | - _trans(prot, PROT_WRITE, VM_WRITE) | - _trans(prot, PROT_EXEC, VM_EXEC); - - prot_bits |= VM_SHARED; - - temp = my_protect_map[prot_bits & 0x0000000f]; - - return temp; -} - -static int agp_create_segment(agp_client * client, agp_region * region) -{ - agp_segment_priv **ret_seg; - agp_segment_priv *seg; - agp_segment *user_seg; - int i; - - seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), - GFP_KERNEL); - if (seg == NULL) { - kfree(region->seg_list); - return -ENOMEM; - } - memset(seg, 0, (sizeof(agp_segment_priv) * region->seg_count)); - user_seg = region->seg_list; - - for (i = 0; i < region->seg_count; i++) { - seg[i].pg_start = user_seg[i].pg_start; - seg[i].pg_count = user_seg[i].pg_count; - seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); - } - ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); - if (ret_seg == NULL) { - kfree(region->seg_list); - kfree(seg); - return -ENOMEM; - } - *ret_seg = seg; - kfree(region->seg_list); - agp_add_seg_to_client(client, ret_seg, region->seg_count); - return 0; -} - -/* End - Routines for managing each client's segment list */ - -/* This function must only be called when current_controller != NULL */ -static void agp_insert_into_pool(agp_memory * temp) -{ - agp_memory *prev; - - prev = agp_fe.current_controller->pool; - - if (prev != NULL) { - prev->prev = temp; - temp->next = prev; - } - agp_fe.current_controller->pool = temp; -} - - -/* File private list routines */ - -agp_file_private *agp_find_private(pid_t pid) -{ - agp_file_private *curr; - - curr = agp_fe.file_priv_list; - - while (curr != NULL) { - if (curr->my_pid == pid) { - return curr; - } - curr = curr->next; - } - - return NULL; -} - -void agp_insert_file_private(agp_file_private * priv) -{ - agp_file_private *prev; - - prev = agp_fe.file_priv_list; - - if (prev != NULL) { - prev->prev = priv; - } - priv->next = prev; - agp_fe.file_priv_list = priv; -} - -void agp_remove_file_private(agp_file_private * priv) -{ - agp_file_private *next; - agp_file_private *prev; - - next = priv->next; - prev = priv->prev; - - if (prev != NULL) { - prev->next = next; - - if (next != NULL) { - next->prev = prev; - } - } else { - if (next != NULL) { - next->prev = NULL; - } - agp_fe.file_priv_list = next; - } -} - -/* End - File flag list routines */ - -/* - * Wrappers for agp_free_memory & agp_allocate_memory - * These make sure that internal lists are kept updated. - */ -static void agp_free_memory_wrap(agp_memory * memory) -{ - agp_remove_from_pool(memory); - agp_free_memory(memory); -} - -static agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) -{ - agp_memory *memory; - - memory = agp_allocate_memory(pg_count, type); - printk(KERN_DEBUG "agp_allocate_memory: %p\n", memory); - if (memory == NULL) { - return NULL; - } - agp_insert_into_pool(memory); - return memory; -} - -/* Routines for managing the list of controllers - - * These routines manage the current controller, and the list of - * controllers - */ - -static agp_controller *agp_find_controller_by_pid(pid_t id) -{ - agp_controller *controller; - - controller = agp_fe.controllers; - - while (controller != NULL) { - if (controller->pid == id) { - return controller; - } - controller = controller->next; - } - - return NULL; -} - -static agp_controller *agp_create_controller(pid_t id) -{ - agp_controller *controller; - - controller = kmalloc(sizeof(agp_controller), GFP_KERNEL); - - if (controller == NULL) { - return NULL; - } - memset(controller, 0, sizeof(agp_controller)); - controller->pid = id; - - return controller; -} - -static int agp_insert_controller(agp_controller * controller) -{ - agp_controller *prev_controller; - - prev_controller = agp_fe.controllers; - controller->next = prev_controller; - - if (prev_controller != NULL) { - prev_controller->prev = controller; - } - agp_fe.controllers = controller; - - return 0; -} - -static void agp_remove_all_clients(agp_controller * controller) -{ - agp_client *client; - agp_client *temp; - - client = controller->clients; - - while (client) { - agp_file_private *priv; - - temp = client; - agp_remove_seg_from_client(temp); - priv = agp_find_private(temp->pid); - - if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &priv->access_flags); - clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - } - client = client->next; - kfree(temp); - } -} - -static void agp_remove_all_memory(agp_controller * controller) -{ - agp_memory *memory; - agp_memory *temp; - - memory = controller->pool; - - while (memory) { - temp = memory; - memory = memory->next; - agp_free_memory_wrap(temp); - } -} - -static int agp_remove_controller(agp_controller * controller) -{ - agp_controller *prev_controller; - agp_controller *next_controller; - - prev_controller = controller->prev; - next_controller = controller->next; - - if (prev_controller != NULL) { - prev_controller->next = next_controller; - if (next_controller != NULL) { - next_controller->prev = prev_controller; - } - } else { - if (next_controller != NULL) { - next_controller->prev = NULL; - } - agp_fe.controllers = next_controller; - } - - agp_remove_all_memory(controller); - agp_remove_all_clients(controller); - - if (agp_fe.current_controller == controller) { - agp_fe.current_controller = NULL; - agp_fe.backend_acquired = FALSE; - agp_backend_release(); - } - kfree(controller); - return 0; -} - -static void agp_controller_make_current(agp_controller * controller) -{ - agp_client *clients; - - clients = controller->clients; - - while (clients != NULL) { - agp_file_private *priv; - - priv = agp_find_private(clients->pid); - - if (priv != NULL) { - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - } - clients = clients->next; - } - - agp_fe.current_controller = controller; -} - -static void agp_controller_release_current(agp_controller * controller, - agp_file_private * controller_priv) -{ - agp_client *clients; - - clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); - clients = controller->clients; - - while (clients != NULL) { - agp_file_private *priv; - - priv = agp_find_private(clients->pid); - - if (priv != NULL) { - clear_bit(AGP_FF_IS_VALID, &priv->access_flags); - } - clients = clients->next; - } - - agp_fe.current_controller = NULL; - agp_fe.used_by_controller = FALSE; - agp_backend_release(); -} - -/* - * Routines for managing client lists - - * These routines are for managing the list of auth'ed clients. - */ - -static agp_client *agp_find_client_in_controller(agp_controller * controller, - pid_t id) -{ - agp_client *client; - - if (controller == NULL) { - return NULL; - } - client = controller->clients; - - while (client != NULL) { - if (client->pid == id) { - return client; - } - client = client->next; - } - - return NULL; -} - -static agp_controller *agp_find_controller_for_client(pid_t id) -{ - agp_controller *controller; - - controller = agp_fe.controllers; - - while (controller != NULL) { - if ((agp_find_client_in_controller(controller, id)) != NULL) { - return controller; - } - controller = controller->next; - } - - return NULL; -} - -static agp_client *agp_find_client_by_pid(pid_t id) -{ - agp_client *temp; - - if (agp_fe.current_controller == NULL) { - return NULL; - } - temp = agp_find_client_in_controller(agp_fe.current_controller, id); - return temp; -} - -static void agp_insert_client(agp_client * client) -{ - agp_client *prev_client; - - prev_client = agp_fe.current_controller->clients; - client->next = prev_client; - - if (prev_client != NULL) { - prev_client->prev = client; - } - agp_fe.current_controller->clients = client; - agp_fe.current_controller->num_clients++; -} - -static agp_client *agp_create_client(pid_t id) -{ - agp_client *new_client; - - new_client = kmalloc(sizeof(agp_client), GFP_KERNEL); - - if (new_client == NULL) { - return NULL; - } - memset(new_client, 0, sizeof(agp_client)); - new_client->pid = id; - agp_insert_client(new_client); - return new_client; -} - -static int agp_remove_client(pid_t id) -{ - agp_client *client; - agp_client *prev_client; - agp_client *next_client; - agp_controller *controller; - - controller = agp_find_controller_for_client(id); - - if (controller == NULL) { - return -EINVAL; - } - client = agp_find_client_in_controller(controller, id); - - if (client == NULL) { - return -EINVAL; - } - prev_client = client->prev; - next_client = client->next; - - if (prev_client != NULL) { - prev_client->next = next_client; - if (next_client != NULL) { - next_client->prev = prev_client; - } - } else { - if (next_client != NULL) { - next_client->prev = NULL; - } - controller->clients = next_client; - } - - controller->num_clients--; - agp_remove_seg_from_client(client); - kfree(client); - return 0; -} - -/* End - Routines for managing client lists */ - -/* File Operations */ - -static int agp_mmap(struct file *file, struct vm_area_struct *vma) -{ - int size; - int current_size; - unsigned long offset; - agp_client *client; - agp_file_private *priv = (agp_file_private *) file->private_data; - agp_kern_info kerninfo; - - AGP_LOCK(); - - if (agp_fe.backend_acquired != TRUE) { - AGP_UNLOCK(); - return -EPERM; - } - if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) { - AGP_UNLOCK(); - return -EPERM; - } - agp_copy_info(&kerninfo); - size = vma->vm_end - vma->vm_start; - current_size = kerninfo.aper_size; - current_size = current_size * 0x100000; - offset = vma->vm_pgoff << PAGE_SHIFT; - - if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { - if ((size + offset) > current_size) { - AGP_UNLOCK(); - return -EINVAL; - } - client = agp_find_client_by_pid(current->pid); - - if (client == NULL) { - AGP_UNLOCK(); - return -EPERM; - } - if (!agp_find_seg_in_client(client, offset, - size, vma->vm_page_prot)) { - AGP_UNLOCK(); - return -EINVAL; - } - if (remap_page_range(vma, vma->vm_start, - (kerninfo.aper_base + offset), - size, vma->vm_page_prot)) { - AGP_UNLOCK(); - return -EAGAIN; - } - AGP_UNLOCK(); - return 0; - } - if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { - if (size != current_size) { - AGP_UNLOCK(); - return -EINVAL; - } - if (remap_page_range(vma, vma->vm_start, kerninfo.aper_base, - size, vma->vm_page_prot)) { - AGP_UNLOCK(); - return -EAGAIN; - } - AGP_UNLOCK(); - return 0; - } - AGP_UNLOCK(); - return -EPERM; -} - -static int agp_release(struct inode *inode, struct file *file) -{ - agp_file_private *priv = (agp_file_private *) file->private_data; - - AGP_LOCK(); - - if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { - agp_controller *controller; - - controller = agp_find_controller_by_pid(priv->my_pid); - - if (controller != NULL) { - if (controller == agp_fe.current_controller) { - agp_controller_release_current(controller, - priv); - } - agp_remove_controller(controller); - } - } - if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { - agp_remove_client(priv->my_pid); - } - agp_remove_file_private(priv); - kfree(priv); - AGP_UNLOCK(); - return 0; -} - -static int agp_open(struct inode *inode, struct file *file) -{ - int minor = minor(inode->i_rdev); - agp_file_private *priv; - agp_client *client; - int rc = -ENXIO; - - AGP_LOCK(); - - if (minor != AGPGART_MINOR) - goto err_out; - - priv = kmalloc(sizeof(agp_file_private), GFP_KERNEL); - if (priv == NULL) - goto err_out_nomem; - - memset(priv, 0, sizeof(agp_file_private)); - set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); - priv->my_pid = current->pid; - - if ((current->uid == 0) || (current->suid == 0)) { - /* Root priv, can be controller */ - set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); - } - client = agp_find_client_by_pid(current->pid); - - if (client != NULL) { - set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - } - file->private_data = (void *) priv; - agp_insert_file_private(priv); - AGP_UNLOCK(); - return 0; - -err_out_nomem: - rc = -ENOMEM; -err_out: - AGP_UNLOCK(); - return rc; -} - - -static ssize_t agp_read(struct file *file, char *buf, - size_t count, loff_t * ppos) -{ - return -EINVAL; -} - -static ssize_t agp_write(struct file *file, const char *buf, - size_t count, loff_t * ppos) -{ - return -EINVAL; -} - -static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_info userinfo; - agp_kern_info kerninfo; - - agp_copy_info(&kerninfo); - - userinfo.version.major = kerninfo.version.major; - userinfo.version.minor = kerninfo.version.minor; - userinfo.bridge_id = kerninfo.device->vendor | - (kerninfo.device->device << 16); - userinfo.agp_mode = kerninfo.mode; - userinfo.aper_base = kerninfo.aper_base; - userinfo.aper_size = kerninfo.aper_size; - userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; - userinfo.pg_used = kerninfo.current_memory; - - if (copy_to_user((void *) arg, &userinfo, sizeof(agp_info))) { - return -EFAULT; - } - return 0; -} - -static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_controller *controller; - if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) { - return -EPERM; - } - if (agp_fe.current_controller != NULL) { - return -EBUSY; - } - if ((agp_backend_acquire()) == 0) { - agp_fe.backend_acquired = TRUE; - } else { - return -EBUSY; - } - - controller = agp_find_controller_by_pid(priv->my_pid); - - if (controller != NULL) { - agp_controller_make_current(controller); - } else { - controller = agp_create_controller(priv->my_pid); - - if (controller == NULL) { - agp_fe.backend_acquired = FALSE; - agp_backend_release(); - return -ENOMEM; - } - agp_insert_controller(controller); - agp_controller_make_current(controller); - } - - set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); - set_bit(AGP_FF_IS_VALID, &priv->access_flags); - return 0; -} - -static int agpioc_release_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_controller_release_current(agp_fe.current_controller, priv); - return 0; -} - -static int agpioc_setup_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_setup mode; - - if (copy_from_user(&mode, (void *) arg, sizeof(agp_setup))) { - return -EFAULT; - } - agp_enable(mode.agp_mode); - return 0; -} - -static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_region reserve; - agp_client *client; - agp_file_private *client_priv; - - - if (copy_from_user(&reserve, (void *) arg, sizeof(agp_region))) { - return -EFAULT; - } - if ((unsigned) reserve.seg_count >= ~0U/sizeof(agp_segment)) - return -EFAULT; - - client = agp_find_client_by_pid(reserve.pid); - - if (reserve.seg_count == 0) { - /* remove a client */ - client_priv = agp_find_private(reserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, - &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, - &client_priv->access_flags); - } - if (client == NULL) { - /* client is already removed */ - return 0; - } - return agp_remove_client(reserve.pid); - } else { - agp_segment *segment; - - if (reserve.seg_count >= 16384) - return -EINVAL; - - segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), - GFP_KERNEL); - - if (segment == NULL) { - return -ENOMEM; - } - if (copy_from_user(segment, (void *) reserve.seg_list, - sizeof(agp_segment) * reserve.seg_count)) { - kfree(segment); - return -EFAULT; - } - reserve.seg_list = segment; - - if (client == NULL) { - /* Create the client and add the segment */ - client = agp_create_client(reserve.pid); - - if (client == NULL) { - kfree(segment); - return -ENOMEM; - } - client_priv = agp_find_private(reserve.pid); - - if (client_priv != NULL) { - set_bit(AGP_FF_IS_CLIENT, - &client_priv->access_flags); - set_bit(AGP_FF_IS_VALID, - &client_priv->access_flags); - } - return agp_create_segment(client, &reserve); - } else { - return agp_create_segment(client, &reserve); - } - } - /* Will never really happen */ - return -EINVAL; -} - -static int agpioc_protect_wrap(agp_file_private * priv, unsigned long arg) -{ - /* This function is not currently implemented */ - return -EINVAL; -} - -static int agpioc_allocate_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_memory *memory; - agp_allocate alloc; - - if (copy_from_user(&alloc, (void *) arg, sizeof(agp_allocate))) { - return -EFAULT; - } - memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); - - if (memory == NULL) { - return -ENOMEM; - } - alloc.key = memory->key; - alloc.physical = memory->physical; - - if (copy_to_user((void *) arg, &alloc, sizeof(agp_allocate))) { - agp_free_memory_wrap(memory); - return -EFAULT; - } - return 0; -} - -static int agpioc_deallocate_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_memory *memory; - - memory = agp_find_mem_by_key((int) arg); - - if (memory == NULL) { - return -EINVAL; - } - agp_free_memory_wrap(memory); - return 0; -} - -static int agpioc_bind_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_bind bind_info; - agp_memory *memory; - - if (copy_from_user(&bind_info, (void *) arg, sizeof(agp_bind))) { - return -EFAULT; - } - memory = agp_find_mem_by_key(bind_info.key); - - if (memory == NULL) { - return -EINVAL; - } - return agp_bind_memory(memory, bind_info.pg_start); -} - -static int agpioc_unbind_wrap(agp_file_private * priv, unsigned long arg) -{ - agp_memory *memory; - agp_unbind unbind; - - if (copy_from_user(&unbind, (void *) arg, sizeof(agp_unbind))) { - return -EFAULT; - } - memory = agp_find_mem_by_key(unbind.key); - - if (memory == NULL) { - return -EINVAL; - } - return agp_unbind_memory(memory); -} - -static int agp_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - agp_file_private *curr_priv = (agp_file_private *) file->private_data; - int ret_val = -ENOTTY; - - AGP_LOCK(); - - if ((agp_fe.current_controller == NULL) && - (cmd != AGPIOC_ACQUIRE)) { - ret_val = -EINVAL; - goto ioctl_out; - } - if ((agp_fe.backend_acquired != TRUE) && - (cmd != AGPIOC_ACQUIRE)) { - ret_val = -EBUSY; - goto ioctl_out; - } - if (cmd != AGPIOC_ACQUIRE) { - if (!(test_bit(AGP_FF_IS_CONTROLLER, - &curr_priv->access_flags))) { - ret_val = -EPERM; - goto ioctl_out; - } - /* Use the original pid of the controller, - * in case it's threaded */ - - if (agp_fe.current_controller->pid != curr_priv->my_pid) { - ret_val = -EBUSY; - goto ioctl_out; - } - } - switch (cmd) { - case AGPIOC_INFO: - { - ret_val = agpioc_info_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_ACQUIRE: - { - ret_val = agpioc_acquire_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_RELEASE: - { - ret_val = agpioc_release_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_SETUP: - { - ret_val = agpioc_setup_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_RESERVE: - { - ret_val = agpioc_reserve_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_PROTECT: - { - ret_val = agpioc_protect_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_ALLOCATE: - { - ret_val = agpioc_allocate_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_DEALLOCATE: - { - ret_val = agpioc_deallocate_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_BIND: - { - ret_val = agpioc_bind_wrap(curr_priv, arg); - goto ioctl_out; - } - case AGPIOC_UNBIND: - { - ret_val = agpioc_unbind_wrap(curr_priv, arg); - goto ioctl_out; - } - } - -ioctl_out: - AGP_UNLOCK(); - return ret_val; -} - -static struct file_operations agp_fops = -{ - owner: THIS_MODULE, - llseek: no_llseek, - read: agp_read, - write: agp_write, - ioctl: agp_ioctl, - mmap: agp_mmap, - open: agp_open, - release: agp_release, -}; - -static struct miscdevice agp_miscdev = -{ - AGPGART_MINOR, - AGPGART_MODULE_NAME, - &agp_fops -}; - -int __init agp_frontend_initialize(void) -{ - memset(&agp_fe, 0, sizeof(struct agp_front_data)); - AGP_LOCK_INIT(); - - if (misc_register(&agp_miscdev)) { - printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR); - return -EIO; - } - return 0; -} - -void __exit agp_frontend_cleanup(void) -{ - misc_deregister(&agp_miscdev); -} - diff -Nru a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/ali-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,265 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ +#include +#include +#include +#include +#include +#include +#include "agp.h" + +static int ali_fetch_size(void) +{ + int i; + u32 temp; + struct aper_size_info_32 *values; + + pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); + temp &= ~(0xfffffff0); + values = A_SIZE_32(agp_bridge.aperture_sizes); + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static void ali_tlbflush(agp_memory * mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); +// clear tag + pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL, + ((temp & 0xfffffff0) | 0x00000001|0x00000002)); +} + +static void ali_cleanup(void) +{ + struct aper_size_info_32 *previous_size; + u32 temp; + + previous_size = A_SIZE_32(agp_bridge.previous_size); + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); +// clear tag + pci_write_config_dword(agp_bridge.dev, ALI_TAGCTRL, + ((temp & 0xffffff00) | 0x00000001|0x00000002)); + + pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, + ((temp & 0x00000ff0) | previous_size->size_value)); +} + +static int ali_configure(void) +{ + u32 temp; + struct aper_size_info_32 *current_size; + + current_size = A_SIZE_32(agp_bridge.current_size); + + /* aperture size and gatt addr */ + pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp); + temp = (((temp & 0x00000ff0) | (agp_bridge.gatt_bus_addr & 0xfffff000)) + | (current_size->size_value & 0xf)); + pci_write_config_dword(agp_bridge.dev, ALI_ATTBASE, temp); + + /* tlb control */ + + /* + * Question: Jeff, ALi's patch deletes this: + * + * pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + * pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, + * ((temp & 0xffffff00) | 0x00000010)); + * + * and replaces it with the following, which seems to duplicate the + * next couple of lines below it. I suspect this was an oversight, + * but you might want to check up on this? + */ + + pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, ALI_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + +#if 0 + if (agp_bridge.type == ALI_M1541) { + u32 nlvm_addr = 0; + + switch (current_size->size_value) { + case 0: break; + case 1: nlvm_addr = 0x100000;break; + case 2: nlvm_addr = 0x200000;break; + case 3: nlvm_addr = 0x400000;break; + case 4: nlvm_addr = 0x800000;break; + case 6: nlvm_addr = 0x1000000;break; + case 7: nlvm_addr = 0x2000000;break; + case 8: nlvm_addr = 0x4000000;break; + case 9: nlvm_addr = 0x8000000;break; + case 10: nlvm_addr = 0x10000000;break; + default: break; + } + nlvm_addr--; + nlvm_addr&=0xfff00000; + + nlvm_addr+= agp_bridge.gart_bus_addr; + nlvm_addr|=(agp_bridge.gart_bus_addr>>12); + printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); + } +#endif + + pci_read_config_dword(agp_bridge.dev, ALI_TLBCTRL, &temp); + temp &= 0xffffff7f; //enable TLB + pci_write_config_dword(agp_bridge.dev, ALI_TLBCTRL, temp); + + return 0; +} + +static unsigned long ali_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static void ali_cache_flush(void) +{ + global_cache_flush(); + + if (agp_bridge.type == ALI_M1541) { + int i, page_count; + u32 temp; + + page_count = 1 << A_SIZE_32(agp_bridge.current_size)->page_order; + for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) { + pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, + (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | + (agp_bridge.gatt_bus_addr + i)) | + ALI_CACHE_FLUSH_EN)); + } + } +} + +static void *ali_alloc_page(void) +{ + void *adr = agp_generic_alloc_page(); + u32 temp; + + if (adr == 0) + return 0; + + if (agp_bridge.type == ALI_M1541) { + pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, + (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | + virt_to_phys(adr)) | + ALI_CACHE_FLUSH_EN )); + } + return adr; +} + +static void ali_destroy_page(void * addr) +{ + u32 temp; + + if (addr == NULL) + return; + + global_cache_flush(); + + if (agp_bridge.type == ALI_M1541) { + pci_read_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, &temp); + pci_write_config_dword(agp_bridge.dev, ALI_CACHE_FLUSH_CTRL, + (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | + virt_to_phys(addr)) | + ALI_CACHE_FLUSH_EN)); + } + + agp_generic_destroy_page(addr); +} + +/* Setup function */ +static struct gatt_mask ali_generic_masks[] = +{ + {mask: 0x00000000, type: 0} +}; + +static struct aper_size_info_32 ali_generic_sizes[7] = +{ + {256, 65536, 6, 10}, + {128, 32768, 5, 9}, + {64, 16384, 4, 8}, + {32, 8192, 3, 7}, + {16, 4096, 2, 6}, + {8, 2048, 1, 4}, + {4, 1024, 0, 3} +}; + +int __init ali_generic_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = ali_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) ali_generic_sizes; + agp_bridge.size_type = U32_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = ali_configure; + agp_bridge.fetch_size = ali_fetch_size; + agp_bridge.cleanup = ali_cleanup; + agp_bridge.tlb_flush = ali_tlbflush; + agp_bridge.mask_memory = ali_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = ali_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = ali_alloc_page; + agp_bridge.agp_destroy_page = ali_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + diff -Nru a/drivers/char/agp/amd-agp.c b/drivers/char/agp/amd-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/amd-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,408 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + +struct amd_page_map { + unsigned long *real; + unsigned long *remapped; +}; + +static struct _amd_irongate_private { + volatile u8 *registers; + struct amd_page_map **gatt_pages; + int num_tables; +} amd_irongate_private; + +static int amd_create_page_map(struct amd_page_map *page_map) +{ + int i; + + page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + if (page_map->real == NULL) { + return -ENOMEM; + } + SetPageReserved(virt_to_page(page_map->real)); + CACHE_FLUSH(); + page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), + PAGE_SIZE); + if (page_map->remapped == NULL) { + ClearPageReserved(virt_to_page(page_map->real)); + free_page((unsigned long) page_map->real); + page_map->real = NULL; + return -ENOMEM; + } + CACHE_FLUSH(); + + for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { + page_map->remapped[i] = agp_bridge.scratch_page; + } + + return 0; +} + +static void amd_free_page_map(struct amd_page_map *page_map) +{ + iounmap(page_map->remapped); + ClearPageReserved(virt_to_page(page_map->real)); + free_page((unsigned long) page_map->real); +} + +static void amd_free_gatt_pages(void) +{ + int i; + struct amd_page_map **tables; + struct amd_page_map *entry; + + tables = amd_irongate_private.gatt_pages; + for(i = 0; i < amd_irongate_private.num_tables; i++) { + entry = tables[i]; + if (entry != NULL) { + if (entry->real != NULL) { + amd_free_page_map(entry); + } + kfree(entry); + } + } + kfree(tables); +} + +static int amd_create_gatt_pages(int nr_tables) +{ + struct amd_page_map **tables; + struct amd_page_map *entry; + int retval = 0; + int i; + + tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *), + GFP_KERNEL); + if (tables == NULL) { + return -ENOMEM; + } + memset(tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1)); + for (i = 0; i < nr_tables; i++) { + entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL); + if (entry == NULL) { + retval = -ENOMEM; + break; + } + memset(entry, 0, sizeof(struct amd_page_map)); + tables[i] = entry; + retval = amd_create_page_map(entry); + if (retval != 0) break; + } + amd_irongate_private.num_tables = nr_tables; + amd_irongate_private.gatt_pages = tables; + + if (retval != 0) amd_free_gatt_pages(); + + return retval; +} + +/* Since we don't need contigious memory we just try + * to get the gatt table once + */ + +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +static int amd_create_gatt_table(void) +{ + struct aper_size_info_lvl2 *value; + struct amd_page_map page_dir; + unsigned long addr; + int retval; + u32 temp; + int i; + + value = A_SIZE_LVL2(agp_bridge.current_size); + retval = amd_create_page_map(&page_dir); + if (retval != 0) { + return retval; + } + + retval = amd_create_gatt_pages(value->num_entries / 1024); + if (retval != 0) { + amd_free_page_map(&page_dir); + return retval; + } + + agp_bridge.gatt_table_real = page_dir.real; + agp_bridge.gatt_table = page_dir.remapped; + agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); + + /* Get the address for the gart region. + * This is a bus address even on the alpha, b/c its + * used to program the agp master not the cpu + */ + + pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); + addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge.gart_bus_addr = addr; + + /* Calculate the agp offset */ + for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { + page_dir.remapped[GET_PAGE_DIR_OFF(addr)] = + virt_to_phys(amd_irongate_private.gatt_pages[i]->real); + page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001; + } + + return 0; +} + +static int amd_free_gatt_table(void) +{ + struct amd_page_map page_dir; + + page_dir.real = agp_bridge.gatt_table_real; + page_dir.remapped = agp_bridge.gatt_table; + + amd_free_gatt_pages(); + amd_free_page_map(&page_dir); + return 0; +} + +static int amd_irongate_fetch_size(void) +{ + int i; + u32 temp; + struct aper_size_info_lvl2 *values; + + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = (temp & 0x0000000e); + values = A_SIZE_LVL2(agp_bridge.aperture_sizes); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int amd_irongate_configure(void) +{ + struct aper_size_info_lvl2 *current_size; + u32 temp; + u16 enable_reg; + + current_size = A_SIZE_LVL2(agp_bridge.current_size); + + /* Get the memory mapped registers */ + pci_read_config_dword(agp_bridge.dev, AMD_MMBASE, &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + amd_irongate_private.registers = (volatile u8 *) ioremap(temp, 4096); + + /* Write out the address of the gatt table */ + OUTREG32(amd_irongate_private.registers, AMD_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* Write the Sync register */ + pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL, 0x80); + + /* Set indexing mode */ + pci_write_config_byte(agp_bridge.dev, AMD_MODECNTL2, 0x00); + + /* Write the enable register */ + enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); + enable_reg = (enable_reg | 0x0004); + OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); + + /* Write out the size register */ + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = (((temp & ~(0x0000000e)) | current_size->size_value) + | 0x00000001); + pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + + /* Flush the tlb */ + OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); + + return 0; +} + +static void amd_irongate_cleanup(void) +{ + struct aper_size_info_lvl2 *previous_size; + u32 temp; + u16 enable_reg; + + previous_size = A_SIZE_LVL2(agp_bridge.previous_size); + + enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE); + enable_reg = (enable_reg & ~(0x0004)); + OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg); + + /* Write back the previous size and disable gart translation */ + pci_read_config_dword(agp_bridge.dev, AMD_APSIZE, &temp); + temp = ((temp & ~(0x0000000f)) | previous_size->size_value); + pci_write_config_dword(agp_bridge.dev, AMD_APSIZE, temp); + iounmap((void *) amd_irongate_private.registers); +} + +/* + * This routine could be implemented by taking the addresses + * written to the GATT, and flushing them individually. However + * currently it just flushes the whole table. Which is probably + * more efficent, since agp_memory blocks can be a large number of + * entries. + */ + +static void amd_irongate_tlbflush(agp_memory * temp) +{ + OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001); +} + +static unsigned long amd_irongate_mask_memory(unsigned long addr, int type) +{ + /* Only type 0 is supported by the irongate */ + + return addr | agp_bridge.masks[0].mask; +} + +static int amd_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + unsigned long *cur_gatt; + unsigned long addr; + + num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + mem->page_count)) { + addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = GET_GATT(addr); + if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = GET_GATT(addr); + cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + } + agp_bridge.tlb_flush(mem); + return 0; +} + +static int amd_remove_memory(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + unsigned long *cur_gatt; + unsigned long addr; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = GET_GATT(addr); + cur_gatt[GET_GATT_OFF(addr)] = + (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static struct aper_size_info_lvl2 amd_irongate_sizes[7] = +{ + {2048, 524288, 0x0000000c}, + {1024, 262144, 0x0000000a}, + {512, 131072, 0x00000008}, + {256, 65536, 0x00000006}, + {128, 32768, 0x00000004}, + {64, 16384, 0x00000002}, + {32, 8192, 0x00000000} +}; + +static struct gatt_mask amd_irongate_masks[] = +{ + {mask: 0x00000001, type: 0} +}; + +int __init amd_irongate_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = amd_irongate_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; + agp_bridge.size_type = LVL2_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = (void *) &amd_irongate_private; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = amd_irongate_configure; + agp_bridge.fetch_size = amd_irongate_fetch_size; + agp_bridge.cleanup = amd_irongate_cleanup; + agp_bridge.tlb_flush = amd_irongate_tlbflush; + agp_bridge.mask_memory = amd_irongate_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = amd_create_gatt_table; + agp_bridge.free_gatt_table = amd_free_gatt_table; + agp_bridge.insert_memory = amd_insert_memory; + agp_bridge.remove_memory = amd_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + diff -Nru a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/frontend.c Sat Jul 20 12:12:34 2002 @@ -0,0 +1,1086 @@ +/* + * AGPGART module frontend version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "agp.h" + +static struct agp_front_data agp_fe; + +static agp_memory *agp_find_mem_by_key(int key) +{ + agp_memory *curr; + + if (agp_fe.current_controller == NULL) { + return NULL; + } + curr = agp_fe.current_controller->pool; + + while (curr != NULL) { + if (curr->key == key) { + return curr; + } + curr = curr->next; + } + + return NULL; +} + +static void agp_remove_from_pool(agp_memory * temp) +{ + agp_memory *prev; + agp_memory *next; + + /* Check to see if this is even in the memory pool */ + + if (agp_find_mem_by_key(temp->key) != NULL) { + next = temp->next; + prev = temp->prev; + + if (prev != NULL) { + prev->next = next; + if (next != NULL) { + next->prev = prev; + } + } else { + /* This is the first item on the list */ + if (next != NULL) { + next->prev = NULL; + } + agp_fe.current_controller->pool = next; + } + } +} + +/* + * Routines for managing each client's segment list - + * These routines handle adding and removing segments + * to each auth'ed client. + */ + +static agp_segment_priv *agp_find_seg_in_client(const agp_client * client, + unsigned long offset, + int size, pgprot_t page_prot) +{ + agp_segment_priv *seg; + int num_segments, pg_start, pg_count, i; + + pg_start = offset / 4096; + pg_count = size / 4096; + seg = *(client->segments); + num_segments = client->num_segments; + + for (i = 0; i < client->num_segments; i++) { + if ((seg[i].pg_start == pg_start) && + (seg[i].pg_count == pg_count) && + (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { + return seg + i; + } + } + + return NULL; +} + +static void agp_remove_seg_from_client(agp_client * client) +{ + if (client->segments != NULL) { + if (*(client->segments) != NULL) { + kfree(*(client->segments)); + } + kfree(client->segments); + } +} + +static void agp_add_seg_to_client(agp_client * client, + agp_segment_priv ** seg, int num_segments) +{ + agp_segment_priv **prev_seg; + + prev_seg = client->segments; + + if (prev_seg != NULL) { + agp_remove_seg_from_client(client); + } + client->num_segments = num_segments; + client->segments = seg; +} + +/* Originally taken from linux/mm/mmap.c from the array + * protection_map. + * The original really should be exported to modules, or + * some routine which does the conversion for you + */ + +static const pgprot_t my_protect_map[16] = +{ + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 +}; + +static pgprot_t agp_convert_mmap_flags(int prot) +{ +#define _trans(x,bit1,bit2) \ +((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0) + + unsigned long prot_bits; + pgprot_t temp; + + prot_bits = _trans(prot, PROT_READ, VM_READ) | + _trans(prot, PROT_WRITE, VM_WRITE) | + _trans(prot, PROT_EXEC, VM_EXEC); + + prot_bits |= VM_SHARED; + + temp = my_protect_map[prot_bits & 0x0000000f]; + + return temp; +} + +static int agp_create_segment(agp_client * client, agp_region * region) +{ + agp_segment_priv **ret_seg; + agp_segment_priv *seg; + agp_segment *user_seg; + int i; + + seg = kmalloc((sizeof(agp_segment_priv) * region->seg_count), + GFP_KERNEL); + if (seg == NULL) { + kfree(region->seg_list); + return -ENOMEM; + } + memset(seg, 0, (sizeof(agp_segment_priv) * region->seg_count)); + user_seg = region->seg_list; + + for (i = 0; i < region->seg_count; i++) { + seg[i].pg_start = user_seg[i].pg_start; + seg[i].pg_count = user_seg[i].pg_count; + seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); + } + ret_seg = kmalloc(sizeof(void *), GFP_KERNEL); + if (ret_seg == NULL) { + kfree(region->seg_list); + kfree(seg); + return -ENOMEM; + } + *ret_seg = seg; + kfree(region->seg_list); + agp_add_seg_to_client(client, ret_seg, region->seg_count); + return 0; +} + +/* End - Routines for managing each client's segment list */ + +/* This function must only be called when current_controller != NULL */ +static void agp_insert_into_pool(agp_memory * temp) +{ + agp_memory *prev; + + prev = agp_fe.current_controller->pool; + + if (prev != NULL) { + prev->prev = temp; + temp->next = prev; + } + agp_fe.current_controller->pool = temp; +} + + +/* File private list routines */ + +agp_file_private *agp_find_private(pid_t pid) +{ + agp_file_private *curr; + + curr = agp_fe.file_priv_list; + + while (curr != NULL) { + if (curr->my_pid == pid) { + return curr; + } + curr = curr->next; + } + + return NULL; +} + +void agp_insert_file_private(agp_file_private * priv) +{ + agp_file_private *prev; + + prev = agp_fe.file_priv_list; + + if (prev != NULL) { + prev->prev = priv; + } + priv->next = prev; + agp_fe.file_priv_list = priv; +} + +void agp_remove_file_private(agp_file_private * priv) +{ + agp_file_private *next; + agp_file_private *prev; + + next = priv->next; + prev = priv->prev; + + if (prev != NULL) { + prev->next = next; + + if (next != NULL) { + next->prev = prev; + } + } else { + if (next != NULL) { + next->prev = NULL; + } + agp_fe.file_priv_list = next; + } +} + +/* End - File flag list routines */ + +/* + * Wrappers for agp_free_memory & agp_allocate_memory + * These make sure that internal lists are kept updated. + */ +static void agp_free_memory_wrap(agp_memory * memory) +{ + agp_remove_from_pool(memory); + agp_free_memory(memory); +} + +static agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) +{ + agp_memory *memory; + + memory = agp_allocate_memory(pg_count, type); + printk(KERN_DEBUG "agp_allocate_memory: %p\n", memory); + if (memory == NULL) { + return NULL; + } + agp_insert_into_pool(memory); + return memory; +} + +/* Routines for managing the list of controllers - + * These routines manage the current controller, and the list of + * controllers + */ + +static agp_controller *agp_find_controller_by_pid(pid_t id) +{ + agp_controller *controller; + + controller = agp_fe.controllers; + + while (controller != NULL) { + if (controller->pid == id) { + return controller; + } + controller = controller->next; + } + + return NULL; +} + +static agp_controller *agp_create_controller(pid_t id) +{ + agp_controller *controller; + + controller = kmalloc(sizeof(agp_controller), GFP_KERNEL); + + if (controller == NULL) { + return NULL; + } + memset(controller, 0, sizeof(agp_controller)); + controller->pid = id; + + return controller; +} + +static int agp_insert_controller(agp_controller * controller) +{ + agp_controller *prev_controller; + + prev_controller = agp_fe.controllers; + controller->next = prev_controller; + + if (prev_controller != NULL) { + prev_controller->prev = controller; + } + agp_fe.controllers = controller; + + return 0; +} + +static void agp_remove_all_clients(agp_controller * controller) +{ + agp_client *client; + agp_client *temp; + + client = controller->clients; + + while (client) { + agp_file_private *priv; + + temp = client; + agp_remove_seg_from_client(temp); + priv = agp_find_private(temp->pid); + + if (priv != NULL) { + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); + clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + } + client = client->next; + kfree(temp); + } +} + +static void agp_remove_all_memory(agp_controller * controller) +{ + agp_memory *memory; + agp_memory *temp; + + memory = controller->pool; + + while (memory) { + temp = memory; + memory = memory->next; + agp_free_memory_wrap(temp); + } +} + +static int agp_remove_controller(agp_controller * controller) +{ + agp_controller *prev_controller; + agp_controller *next_controller; + + prev_controller = controller->prev; + next_controller = controller->next; + + if (prev_controller != NULL) { + prev_controller->next = next_controller; + if (next_controller != NULL) { + next_controller->prev = prev_controller; + } + } else { + if (next_controller != NULL) { + next_controller->prev = NULL; + } + agp_fe.controllers = next_controller; + } + + agp_remove_all_memory(controller); + agp_remove_all_clients(controller); + + if (agp_fe.current_controller == controller) { + agp_fe.current_controller = NULL; + agp_fe.backend_acquired = FALSE; + agp_backend_release(); + } + kfree(controller); + return 0; +} + +static void agp_controller_make_current(agp_controller * controller) +{ + agp_client *clients; + + clients = controller->clients; + + while (clients != NULL) { + agp_file_private *priv; + + priv = agp_find_private(clients->pid); + + if (priv != NULL) { + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + } + clients = clients->next; + } + + agp_fe.current_controller = controller; +} + +static void agp_controller_release_current(agp_controller * controller, + agp_file_private * controller_priv) +{ + agp_client *clients; + + clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags); + clients = controller->clients; + + while (clients != NULL) { + agp_file_private *priv; + + priv = agp_find_private(clients->pid); + + if (priv != NULL) { + clear_bit(AGP_FF_IS_VALID, &priv->access_flags); + } + clients = clients->next; + } + + agp_fe.current_controller = NULL; + agp_fe.used_by_controller = FALSE; + agp_backend_release(); +} + +/* + * Routines for managing client lists - + * These routines are for managing the list of auth'ed clients. + */ + +static agp_client *agp_find_client_in_controller(agp_controller * controller, + pid_t id) +{ + agp_client *client; + + if (controller == NULL) { + return NULL; + } + client = controller->clients; + + while (client != NULL) { + if (client->pid == id) { + return client; + } + client = client->next; + } + + return NULL; +} + +static agp_controller *agp_find_controller_for_client(pid_t id) +{ + agp_controller *controller; + + controller = agp_fe.controllers; + + while (controller != NULL) { + if ((agp_find_client_in_controller(controller, id)) != NULL) { + return controller; + } + controller = controller->next; + } + + return NULL; +} + +static agp_client *agp_find_client_by_pid(pid_t id) +{ + agp_client *temp; + + if (agp_fe.current_controller == NULL) { + return NULL; + } + temp = agp_find_client_in_controller(agp_fe.current_controller, id); + return temp; +} + +static void agp_insert_client(agp_client * client) +{ + agp_client *prev_client; + + prev_client = agp_fe.current_controller->clients; + client->next = prev_client; + + if (prev_client != NULL) { + prev_client->prev = client; + } + agp_fe.current_controller->clients = client; + agp_fe.current_controller->num_clients++; +} + +static agp_client *agp_create_client(pid_t id) +{ + agp_client *new_client; + + new_client = kmalloc(sizeof(agp_client), GFP_KERNEL); + + if (new_client == NULL) { + return NULL; + } + memset(new_client, 0, sizeof(agp_client)); + new_client->pid = id; + agp_insert_client(new_client); + return new_client; +} + +static int agp_remove_client(pid_t id) +{ + agp_client *client; + agp_client *prev_client; + agp_client *next_client; + agp_controller *controller; + + controller = agp_find_controller_for_client(id); + + if (controller == NULL) { + return -EINVAL; + } + client = agp_find_client_in_controller(controller, id); + + if (client == NULL) { + return -EINVAL; + } + prev_client = client->prev; + next_client = client->next; + + if (prev_client != NULL) { + prev_client->next = next_client; + if (next_client != NULL) { + next_client->prev = prev_client; + } + } else { + if (next_client != NULL) { + next_client->prev = NULL; + } + controller->clients = next_client; + } + + controller->num_clients--; + agp_remove_seg_from_client(client); + kfree(client); + return 0; +} + +/* End - Routines for managing client lists */ + +/* File Operations */ + +static int agp_mmap(struct file *file, struct vm_area_struct *vma) +{ + int size; + int current_size; + unsigned long offset; + agp_client *client; + agp_file_private *priv = (agp_file_private *) file->private_data; + agp_kern_info kerninfo; + + AGP_LOCK(); + + if (agp_fe.backend_acquired != TRUE) { + AGP_UNLOCK(); + return -EPERM; + } + if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) { + AGP_UNLOCK(); + return -EPERM; + } + agp_copy_info(&kerninfo); + size = vma->vm_end - vma->vm_start; + current_size = kerninfo.aper_size; + current_size = current_size * 0x100000; + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { + if ((size + offset) > current_size) { + AGP_UNLOCK(); + return -EINVAL; + } + client = agp_find_client_by_pid(current->pid); + + if (client == NULL) { + AGP_UNLOCK(); + return -EPERM; + } + if (!agp_find_seg_in_client(client, offset, + size, vma->vm_page_prot)) { + AGP_UNLOCK(); + return -EINVAL; + } + if (remap_page_range(vma, vma->vm_start, + (kerninfo.aper_base + offset), + size, vma->vm_page_prot)) { + AGP_UNLOCK(); + return -EAGAIN; + } + AGP_UNLOCK(); + return 0; + } + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { + if (size != current_size) { + AGP_UNLOCK(); + return -EINVAL; + } + if (remap_page_range(vma, vma->vm_start, kerninfo.aper_base, + size, vma->vm_page_prot)) { + AGP_UNLOCK(); + return -EAGAIN; + } + AGP_UNLOCK(); + return 0; + } + AGP_UNLOCK(); + return -EPERM; +} + +static int agp_release(struct inode *inode, struct file *file) +{ + agp_file_private *priv = (agp_file_private *) file->private_data; + + AGP_LOCK(); + + if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) { + agp_controller *controller; + + controller = agp_find_controller_by_pid(priv->my_pid); + + if (controller != NULL) { + if (controller == agp_fe.current_controller) { + agp_controller_release_current(controller, + priv); + } + agp_remove_controller(controller); + } + } + if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) { + agp_remove_client(priv->my_pid); + } + agp_remove_file_private(priv); + kfree(priv); + AGP_UNLOCK(); + return 0; +} + +static int agp_open(struct inode *inode, struct file *file) +{ + int minor = minor(inode->i_rdev); + agp_file_private *priv; + agp_client *client; + int rc = -ENXIO; + + AGP_LOCK(); + + if (minor != AGPGART_MINOR) + goto err_out; + + priv = kmalloc(sizeof(agp_file_private), GFP_KERNEL); + if (priv == NULL) + goto err_out_nomem; + + memset(priv, 0, sizeof(agp_file_private)); + set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags); + priv->my_pid = current->pid; + + if ((current->uid == 0) || (current->suid == 0)) { + /* Root priv, can be controller */ + set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags); + } + client = agp_find_client_by_pid(current->pid); + + if (client != NULL) { + set_bit(AGP_FF_IS_CLIENT, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + } + file->private_data = (void *) priv; + agp_insert_file_private(priv); + AGP_UNLOCK(); + return 0; + +err_out_nomem: + rc = -ENOMEM; +err_out: + AGP_UNLOCK(); + return rc; +} + + +static ssize_t agp_read(struct file *file, char *buf, + size_t count, loff_t * ppos) +{ + return -EINVAL; +} + +static ssize_t agp_write(struct file *file, const char *buf, + size_t count, loff_t * ppos) +{ + return -EINVAL; +} + +static int agpioc_info_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_info userinfo; + agp_kern_info kerninfo; + + agp_copy_info(&kerninfo); + + userinfo.version.major = kerninfo.version.major; + userinfo.version.minor = kerninfo.version.minor; + userinfo.bridge_id = kerninfo.device->vendor | + (kerninfo.device->device << 16); + userinfo.agp_mode = kerninfo.mode; + userinfo.aper_base = kerninfo.aper_base; + userinfo.aper_size = kerninfo.aper_size; + userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; + userinfo.pg_used = kerninfo.current_memory; + + if (copy_to_user((void *) arg, &userinfo, sizeof(agp_info))) { + return -EFAULT; + } + return 0; +} + +static int agpioc_acquire_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_controller *controller; + if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags))) { + return -EPERM; + } + if (agp_fe.current_controller != NULL) { + return -EBUSY; + } + if ((agp_backend_acquire()) == 0) { + agp_fe.backend_acquired = TRUE; + } else { + return -EBUSY; + } + + controller = agp_find_controller_by_pid(priv->my_pid); + + if (controller != NULL) { + agp_controller_make_current(controller); + } else { + controller = agp_create_controller(priv->my_pid); + + if (controller == NULL) { + agp_fe.backend_acquired = FALSE; + agp_backend_release(); + return -ENOMEM; + } + agp_insert_controller(controller); + agp_controller_make_current(controller); + } + + set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags); + set_bit(AGP_FF_IS_VALID, &priv->access_flags); + return 0; +} + +static int agpioc_release_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_controller_release_current(agp_fe.current_controller, priv); + return 0; +} + +static int agpioc_setup_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_setup mode; + + if (copy_from_user(&mode, (void *) arg, sizeof(agp_setup))) { + return -EFAULT; + } + agp_enable(mode.agp_mode); + return 0; +} + +static int agpioc_reserve_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_region reserve; + agp_client *client; + agp_file_private *client_priv; + + + if (copy_from_user(&reserve, (void *) arg, sizeof(agp_region))) { + return -EFAULT; + } + if ((unsigned) reserve.seg_count >= ~0U/sizeof(agp_segment)) + return -EFAULT; + + client = agp_find_client_by_pid(reserve.pid); + + if (reserve.seg_count == 0) { + /* remove a client */ + client_priv = agp_find_private(reserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, + &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, + &client_priv->access_flags); + } + if (client == NULL) { + /* client is already removed */ + return 0; + } + return agp_remove_client(reserve.pid); + } else { + agp_segment *segment; + + if (reserve.seg_count >= 16384) + return -EINVAL; + + segment = kmalloc((sizeof(agp_segment) * reserve.seg_count), + GFP_KERNEL); + + if (segment == NULL) { + return -ENOMEM; + } + if (copy_from_user(segment, (void *) reserve.seg_list, + sizeof(agp_segment) * reserve.seg_count)) { + kfree(segment); + return -EFAULT; + } + reserve.seg_list = segment; + + if (client == NULL) { + /* Create the client and add the segment */ + client = agp_create_client(reserve.pid); + + if (client == NULL) { + kfree(segment); + return -ENOMEM; + } + client_priv = agp_find_private(reserve.pid); + + if (client_priv != NULL) { + set_bit(AGP_FF_IS_CLIENT, + &client_priv->access_flags); + set_bit(AGP_FF_IS_VALID, + &client_priv->access_flags); + } + return agp_create_segment(client, &reserve); + } else { + return agp_create_segment(client, &reserve); + } + } + /* Will never really happen */ + return -EINVAL; +} + +static int agpioc_protect_wrap(agp_file_private * priv, unsigned long arg) +{ + /* This function is not currently implemented */ + return -EINVAL; +} + +static int agpioc_allocate_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_memory *memory; + agp_allocate alloc; + + if (copy_from_user(&alloc, (void *) arg, sizeof(agp_allocate))) { + return -EFAULT; + } + memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); + + if (memory == NULL) { + return -ENOMEM; + } + alloc.key = memory->key; + alloc.physical = memory->physical; + + if (copy_to_user((void *) arg, &alloc, sizeof(agp_allocate))) { + agp_free_memory_wrap(memory); + return -EFAULT; + } + return 0; +} + +static int agpioc_deallocate_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_memory *memory; + + memory = agp_find_mem_by_key((int) arg); + + if (memory == NULL) { + return -EINVAL; + } + agp_free_memory_wrap(memory); + return 0; +} + +static int agpioc_bind_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_bind bind_info; + agp_memory *memory; + + if (copy_from_user(&bind_info, (void *) arg, sizeof(agp_bind))) { + return -EFAULT; + } + memory = agp_find_mem_by_key(bind_info.key); + + if (memory == NULL) { + return -EINVAL; + } + return agp_bind_memory(memory, bind_info.pg_start); +} + +static int agpioc_unbind_wrap(agp_file_private * priv, unsigned long arg) +{ + agp_memory *memory; + agp_unbind unbind; + + if (copy_from_user(&unbind, (void *) arg, sizeof(agp_unbind))) { + return -EFAULT; + } + memory = agp_find_mem_by_key(unbind.key); + + if (memory == NULL) { + return -EINVAL; + } + return agp_unbind_memory(memory); +} + +static int agp_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + agp_file_private *curr_priv = (agp_file_private *) file->private_data; + int ret_val = -ENOTTY; + + AGP_LOCK(); + + if ((agp_fe.current_controller == NULL) && + (cmd != AGPIOC_ACQUIRE)) { + ret_val = -EINVAL; + goto ioctl_out; + } + if ((agp_fe.backend_acquired != TRUE) && + (cmd != AGPIOC_ACQUIRE)) { + ret_val = -EBUSY; + goto ioctl_out; + } + if (cmd != AGPIOC_ACQUIRE) { + if (!(test_bit(AGP_FF_IS_CONTROLLER, + &curr_priv->access_flags))) { + ret_val = -EPERM; + goto ioctl_out; + } + /* Use the original pid of the controller, + * in case it's threaded */ + + if (agp_fe.current_controller->pid != curr_priv->my_pid) { + ret_val = -EBUSY; + goto ioctl_out; + } + } + switch (cmd) { + case AGPIOC_INFO: + { + ret_val = agpioc_info_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_ACQUIRE: + { + ret_val = agpioc_acquire_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_RELEASE: + { + ret_val = agpioc_release_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_SETUP: + { + ret_val = agpioc_setup_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_RESERVE: + { + ret_val = agpioc_reserve_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_PROTECT: + { + ret_val = agpioc_protect_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_ALLOCATE: + { + ret_val = agpioc_allocate_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_DEALLOCATE: + { + ret_val = agpioc_deallocate_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_BIND: + { + ret_val = agpioc_bind_wrap(curr_priv, arg); + goto ioctl_out; + } + case AGPIOC_UNBIND: + { + ret_val = agpioc_unbind_wrap(curr_priv, arg); + goto ioctl_out; + } + } + +ioctl_out: + AGP_UNLOCK(); + return ret_val; +} + +static struct file_operations agp_fops = +{ + owner: THIS_MODULE, + llseek: no_llseek, + read: agp_read, + write: agp_write, + ioctl: agp_ioctl, + mmap: agp_mmap, + open: agp_open, + release: agp_release, +}; + +static struct miscdevice agp_miscdev = +{ + AGPGART_MINOR, + AGPGART_MODULE_NAME, + &agp_fops +}; + +int __init agp_frontend_initialize(void) +{ + memset(&agp_fe, 0, sizeof(struct agp_front_data)); + AGP_LOCK_INIT(); + + if (misc_register(&agp_miscdev)) { + printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR); + return -EIO; + } + return 0; +} + +void __exit agp_frontend_cleanup(void) +{ + misc_deregister(&agp_miscdev); +} + diff -Nru a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/hp-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,394 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + + +#ifndef log2 +#define log2(x) ffz(~(x)) +#endif + +#define HP_ZX1_IOVA_BASE GB(1UL) +#define HP_ZX1_IOVA_SIZE GB(1UL) +#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) +#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL + +#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL +#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \ + hp_private.io_tlb_shift) + +static struct aper_size_info_fixed hp_zx1_sizes[] = +{ + {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ +}; + +static struct gatt_mask hp_zx1_masks[] = +{ + {mask: HP_ZX1_PDIR_VALID_BIT, type: 0} +}; + +static struct _hp_private { + struct pci_dev *ioc; + volatile u8 *registers; + u64 *io_pdir; // PDIR for entire IOVA + u64 *gatt; // PDIR just for GART (subset of above) + u64 gatt_entries; + u64 iova_base; + u64 gart_base; + u64 gart_size; + u64 io_pdir_size; + int io_pdir_owner; // do we own it, or share it with sba_iommu? + int io_page_size; + int io_tlb_shift; + int io_tlb_ps; // IOC ps config + int io_pages_per_kpage; +} hp_private; + +static int __init hp_zx1_ioc_shared(void) +{ + struct _hp_private *hp = &hp_private; + + printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n"); + + /* + * IOC already configured by sba_iommu module; just use + * its setup. We assume: + * - IOVA space is 1Gb in size + * - first 512Mb is IOMMU, second 512Mb is GART + */ + hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG); + switch (hp->io_tlb_ps) { + case 0: hp->io_tlb_shift = 12; break; + case 1: hp->io_tlb_shift = 13; break; + case 2: hp->io_tlb_shift = 14; break; + case 3: hp->io_tlb_shift = 16; break; + default: + printk(KERN_ERR PFX "Invalid IOTLB page size " + "configuration 0x%x\n", hp->io_tlb_ps); + hp->gatt = 0; + hp->gatt_entries = 0; + return -ENODEV; + } + hp->io_page_size = 1 << hp->io_tlb_shift; + hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; + + hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1; + hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; + + hp->gart_size = HP_ZX1_GART_SIZE; + hp->gatt_entries = hp->gart_size / hp->io_page_size; + + hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE)); + hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; + + if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { + hp->gatt = 0; + hp->gatt_entries = 0; + printk(KERN_ERR PFX "No reserved IO PDIR entry found; " + "GART disabled\n"); + return -ENODEV; + } + + return 0; +} + +static int __init hp_zx1_ioc_owner(u8 ioc_rev) +{ + struct _hp_private *hp = &hp_private; + + printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n"); + + /* + * Select an IOV page size no larger than system page size. + */ + if (PAGE_SIZE >= KB(64)) { + hp->io_tlb_shift = 16; + hp->io_tlb_ps = 3; + } else if (PAGE_SIZE >= KB(16)) { + hp->io_tlb_shift = 14; + hp->io_tlb_ps = 2; + } else if (PAGE_SIZE >= KB(8)) { + hp->io_tlb_shift = 13; + hp->io_tlb_ps = 1; + } else { + hp->io_tlb_shift = 12; + hp->io_tlb_ps = 0; + } + hp->io_page_size = 1 << hp->io_tlb_shift; + hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; + + hp->iova_base = HP_ZX1_IOVA_BASE; + hp->gart_size = HP_ZX1_GART_SIZE; + hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size; + + hp->gatt_entries = hp->gart_size / hp->io_page_size; + hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64); + + return 0; +} + +static int __init hp_zx1_ioc_init(void) +{ + struct _hp_private *hp = &hp_private; + struct pci_dev *ioc; + int i; + u8 ioc_rev; + + ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); + if (!ioc) { + printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n"); + return -ENODEV; + } + hp->ioc = ioc; + + pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { + hp->registers = (u8 *) ioremap(pci_resource_start(ioc, + i), + pci_resource_len(ioc, i)); + break; + } + } + if (!hp->registers) { + printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n"); + + return -ENODEV; + } + + /* + * If the IOTLB is currently disabled, we can take it over. + * Otherwise, we have to share with sba_iommu. + */ + hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; + + if (hp->io_pdir_owner) + return hp_zx1_ioc_owner(ioc_rev); + + return hp_zx1_ioc_shared(); +} + +static int hp_zx1_fetch_size(void) +{ + int size; + + size = hp_private.gart_size / MB(1); + hp_zx1_sizes[0].size = size; + agp_bridge.current_size = (void *) &hp_zx1_sizes[0]; + return size; +} + +static int hp_zx1_configure(void) +{ + struct _hp_private *hp = &hp_private; + + agp_bridge.gart_bus_addr = hp->gart_base; + agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP); + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); + + if (hp->io_pdir_owner) { + OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, + virt_to_phys(hp->io_pdir)); + OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps); + OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); + OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1); + OUTREG64(hp->registers, HP_ZX1_PCOM, + hp->iova_base | log2(HP_ZX1_IOVA_SIZE)); + INREG64(hp->registers, HP_ZX1_PCOM); + } + + return 0; +} + +static void hp_zx1_cleanup(void) +{ + struct _hp_private *hp = &hp_private; + + if (hp->io_pdir_owner) + OUTREG64(hp->registers, HP_ZX1_IBASE, 0); + iounmap((void *) hp->registers); +} + +static void hp_zx1_tlbflush(agp_memory * mem) +{ + struct _hp_private *hp = &hp_private; + + OUTREG64(hp->registers, HP_ZX1_PCOM, + hp->gart_base | log2(hp->gart_size)); + INREG64(hp->registers, HP_ZX1_PCOM); +} + +static int hp_zx1_create_gatt_table(void) +{ + struct _hp_private *hp = &hp_private; + int i; + + if (hp->io_pdir_owner) { + hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL, + get_order(hp->io_pdir_size)); + if (!hp->io_pdir) { + printk(KERN_ERR PFX "Couldn't allocate contiguous " + "memory for I/O PDIR\n"); + hp->gatt = 0; + hp->gatt_entries = 0; + return -ENOMEM; + } + memset(hp->io_pdir, 0, hp->io_pdir_size); + + hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; + } + + for (i = 0; i < hp->gatt_entries; i++) { + hp->gatt[i] = (unsigned long) agp_bridge.scratch_page; + } + + return 0; +} + +static int hp_zx1_free_gatt_table(void) +{ + struct _hp_private *hp = &hp_private; + + if (hp->io_pdir_owner) + free_pages((unsigned long) hp->io_pdir, + get_order(hp->io_pdir_size)); + else + hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE; + return 0; +} + +static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type) +{ + struct _hp_private *hp = &hp_private; + int i, k; + off_t j, io_pg_start; + int io_pg_count; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + io_pg_start = hp->io_pages_per_kpage * pg_start; + io_pg_count = hp->io_pages_per_kpage * mem->page_count; + if ((io_pg_start + io_pg_count) > hp->gatt_entries) { + return -EINVAL; + } + + j = io_pg_start; + while (j < (io_pg_start + io_pg_count)) { + if (hp->gatt[j]) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + + for (i = 0, j = io_pg_start; i < mem->page_count; i++) { + unsigned long paddr; + + paddr = mem->memory[i]; + for (k = 0; + k < hp->io_pages_per_kpage; + k++, j++, paddr += hp->io_page_size) { + hp->gatt[j] = agp_bridge.mask_memory(paddr, type); + } + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type) +{ + struct _hp_private *hp = &hp_private; + int i, io_pg_start, io_pg_count; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + io_pg_start = hp->io_pages_per_kpage * pg_start; + io_pg_count = hp->io_pages_per_kpage * mem->page_count; + for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) { + hp->gatt[i] = agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static unsigned long hp_zx1_mask_memory(unsigned long addr, int type) +{ + return HP_ZX1_PDIR_VALID_BIT | addr; +} + +static unsigned long hp_zx1_unmask_memory(unsigned long addr) +{ + return addr & ~(HP_ZX1_PDIR_VALID_BIT); +} + +int __init hp_zx1_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = hp_zx1_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.dev_private_data = NULL; + agp_bridge.size_type = FIXED_APER_SIZE; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = hp_zx1_configure; + agp_bridge.fetch_size = hp_zx1_fetch_size; + agp_bridge.cleanup = hp_zx1_cleanup; + agp_bridge.tlb_flush = hp_zx1_tlbflush; + agp_bridge.mask_memory = hp_zx1_mask_memory; + agp_bridge.unmask_memory = hp_zx1_unmask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; + agp_bridge.free_gatt_table = hp_zx1_free_gatt_table; + agp_bridge.insert_memory = hp_zx1_insert_memory; + agp_bridge.remove_memory = hp_zx1_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.cant_use_aperture = 1; + + return hp_zx1_ioc_init(); + + (void) pdev; /* unused */ +} + diff -Nru a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/i460-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,595 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + +/* BIOS configures the chipset so that one of two apbase registers are used */ +static u8 intel_i460_dynamic_apbase = 0x10; + +/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ +static u8 intel_i460_pageshift = 12; +static u32 intel_i460_pagesize; + +/* Keep track of which is larger, chipset or kernel page size. */ +static u32 intel_i460_cpk = 1; + +/* Structure for tracking partial use of 4MB GART pages */ +static u32 **i460_pg_detail = NULL; +static u32 *i460_pg_count = NULL; + +#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) +#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) + +#define I460_SRAM_IO_DISABLE (1 << 4) +#define I460_BAPBASE_ENABLE (1 << 3) +#define I460_AGPSIZ_MASK 0x7 +#define I460_4M_PS (1 << 1) + +#define log2(x) ffz(~(x)) + +static inline void intel_i460_read_back (volatile u32 *entry) +{ + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + *entry; +} + +static int intel_i460_fetch_size(void) +{ + int i; + u8 temp; + struct aper_size_info_8 *values; + + /* Determine the GART page size */ + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); + intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; + intel_i460_pagesize = 1UL << intel_i460_pageshift; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + + /* Exit now if the IO drivers for the GART SRAMS are turned off */ + if (temp & I460_SRAM_IO_DISABLE) { + printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); + printk(KERN_ERR PFX "AGPGART operation not possible\n"); + return 0; + } + + /* Make sure we don't try to create an 2 ^ 23 entry GATT */ + if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); + return 0; + } + + /* Determine the proper APBASE register */ + if (temp & I460_BAPBASE_ENABLE) + intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; + else + intel_i460_dynamic_apbase = INTEL_I460_APBASE; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* + * Dynamically calculate the proper num_entries and page_order values for + * the define aperture sizes. Take care not to shift off the end of + * values[i].size. + */ + values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12); + values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); + } + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* Neglect control bits when matching up size_value */ + if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { + agp_bridge.previous_size = agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* There isn't anything to do here since 460 has no GART TLB. */ +static void intel_i460_tlb_flush(agp_memory * mem) +{ + return; +} + +/* + * This utility function is needed to prevent corruption of the control bits + * which are stored along with the aperture size in 460's AGPSIZ register + */ +static void intel_i460_write_agpsiz(u8 size_value) +{ + u8 temp; + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, + ((temp & ~I460_AGPSIZ_MASK) | size_value)); +} + +static void intel_i460_cleanup(void) +{ + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + intel_i460_write_agpsiz(previous_size->size_value); + + if (intel_i460_cpk == 0) { + vfree(i460_pg_detail); + vfree(i460_pg_count); + } +} + + +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +static int intel_i460_configure(void) +{ + union { + u32 small[2]; + u64 large; + } temp; + u8 scratch; + int i; + + struct aper_size_info_8 *current_size; + + temp.large = 0; + + current_size = A_SIZE_8(agp_bridge.current_size); + intel_i460_write_agpsiz(current_size->size_value); + + /* + * Do the necessary rigmarole to read all eight bytes of APBASE. + * This has to be done since the AGP aperture can be above 4GB on + * 460 based systems. + */ + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0])); + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1])); + + /* Clear BAR control bits */ + agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, + (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); + + /* + * Initialize partial allocation trackers if a GART page is bigger than + * a kernel page. + */ + if (I460_CPAGES_PER_KPAGE >= 1) { + intel_i460_cpk = 1; + } else { + intel_i460_cpk = 0; + + i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries); + i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries); + + for (i = 0; i < current_size->num_entries; i++) { + i460_pg_count[i] = 0; + i460_pg_detail[i] = NULL; + } + } + return 0; +} + +static int intel_i460_create_gatt_table(void) +{ + char *table; + int i; + int page_order; + int num_entries; + void *temp; + + /* + * Load up the fixed address of the GART SRAMS which hold our + * GATT table. + */ + table = (char *) __va(INTEL_I460_ATTBASE); + + temp = agp_bridge.current_size; + page_order = A_SIZE_8(temp)->page_order; + num_entries = A_SIZE_8(temp)->num_entries; + + agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + return 0; +} + +static int intel_i460_free_gatt_table(void) +{ + int num_entries; + int i; + void *temp; + + temp = agp_bridge.current_size; + + num_entries = A_SIZE_8(temp)->num_entries; + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + + iounmap(agp_bridge.gatt_table); + return 0; +} + +/* These functions are called when PAGE_SIZE exceeds the GART page size */ + +static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type) +{ + int i, j, k, num_entries; + void *temp; + unsigned long paddr; + + /* + * The rest of the kernel will compute page offsets in terms of + * PAGE_SIZE. + */ + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + +#if 0 + /* not necessary since 460 GART is operated in coherent mode... */ + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } +#endif + + for (i = 0, j = pg_start; i < mem->page_count; i++) { + paddr = mem->memory[i]; + for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) + agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type); + } + + intel_i460_read_back(agp_bridge.gatt_table + j - 1); + return 0; +} + +static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type) +{ + int i; + + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++) + agp_bridge.gatt_table[i] = 0; + + intel_i460_read_back(agp_bridge.gatt_table + i - 1); + return 0; +} + +/* + * These functions are called when the GART page size exceeds PAGE_SIZE. + * + * This situation is interesting since AGP memory allocations that are + * smaller than a single GART page are possible. The structures i460_pg_count + * and i460_pg_detail track partial allocation of the large GART pages to + * work around this issue. + * + * i460_pg_count[pg_num] tracks the number of kernel pages in use within + * GART page pg_num. i460_pg_detail[pg_num] is an array containing a + * psuedo-GART entry for each of the aforementioned kernel pages. The whole + * of i460_pg_detail is equivalent to a giant GATT with page size equal to + * that of the kernel. + */ + +static void *intel_i460_alloc_large_page(int pg_num) +{ + int i; + void *bp, *bp_end; + struct page *page; + + i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE); + if (i460_pg_detail[pg_num] == NULL) { + printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); + return NULL; + } + + for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) + i460_pg_detail[pg_num][i] = 0; + + bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT); + if (bp == NULL) { + printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); + return NULL; + } + + bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); + + for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { + atomic_inc(&agp_bridge.current_memory_agp); + } + return bp; +} + +static void intel_i460_free_large_page(int pg_num, unsigned long addr) +{ + struct page *page; + void *bp, *bp_end; + + bp = (void *) __va(addr); + bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))); + + vfree(i460_pg_detail[pg_num]); + i460_pg_detail[pg_num] = NULL; + + for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { + atomic_dec(&agp_bridge.current_memory_agp); + } + + free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); +} + +static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + + if (end_pg > num_entries) { + printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + /* Check if the requested region of the aperture is free */ + for (pg = start_pg; pg <= end_pg; pg++) { + /* Allocate new GART pages if necessary */ + if (i460_pg_detail[pg] == NULL) { + temp = intel_i460_alloc_large_page(pg); + if (temp == NULL) + return -ENOMEM; + agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp, + 0); + intel_i460_read_back(agp_bridge.gatt_table + pg); + } + + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++) + { + if (i460_pg_detail[pg][idx] != 0) + return -EBUSY; + } + } + +#if 0 + /* not necessary since 460 GART is operated in coherent mode... */ + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } +#endif + + for (pg = start_pg, i = 0; pg <= end_pg; pg++) { + paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++, i++) + { + mem->memory[i] = paddr + (idx * PAGE_SIZE); + i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i], + mem->type); + i460_pg_count[pg]++; + } + } + + return 0; +} + +static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + + for (i = 0, pg = start_pg; pg <= end_pg; pg++) { + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + idx++, i++) + { + mem->memory[i] = 0; + i460_pg_detail[pg][idx] = 0; + i460_pg_count[pg]--; + } + + /* Free GART pages if they are unused */ + if (i460_pg_count[pg] == 0) { + paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); + agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; + intel_i460_read_back(agp_bridge.gatt_table + pg); + intel_i460_free_large_page(pg, paddr); + } + } + return 0; +} + +/* Dummy routines to call the approriate {cpk,kpc} function */ + +static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_insert_memory_cpk(mem, pg_start, type); + else + return intel_i460_insert_memory_kpc(mem, pg_start, type); +} + +static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_remove_memory_cpk(mem, pg_start, type); + else + return intel_i460_remove_memory_kpc(mem, pg_start, type); +} + +/* + * If the kernel page size is smaller that the chipset page size, we don't + * want to allocate memory until we know where it is to be bound in the + * aperture (a multi-kernel-page alloc might fit inside of an already + * allocated GART page). Consequently, don't allocate or free anything + * if i460_cpk (meaning chipset pages per kernel page) isn't set. + * + * Let's just hope nobody counts on the allocated AGP memory being there + * before bind time (I don't think current drivers do)... + */ +static void * intel_i460_alloc_page(void) +{ + if (intel_i460_cpk) + return agp_generic_alloc_page(); + + /* Returning NULL would cause problems */ + /* AK: really dubious code. */ + return (void *)~0UL; +} + +static void intel_i460_destroy_page(void *page) +{ + if (intel_i460_cpk) + agp_generic_destroy_page(page); +} + +static struct gatt_mask intel_i460_masks[] = +{ + { + mask: INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, + type: 0 + } +}; + +static unsigned long intel_i460_mask_memory(unsigned long addr, int type) +{ + /* Make sure the returned address is a valid GATT entry */ + return (agp_bridge.masks[0].mask + | (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); +} + +static unsigned long intel_i460_unmask_memory(unsigned long addr) +{ + /* Turn a GATT entry into a physical address */ + return ((addr & 0xffffff) << 12); +} + +static struct aper_size_info_8 intel_i460_sizes[3] = +{ + /* + * The 32GB aperture is only available with a 4M GART page size. + * Due to the dynamic GART page size, we can't figure out page_order + * or num_entries until runtime. + */ + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; + +int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused))) +{ + agp_bridge.masks = intel_i460_masks; + agp_bridge.aperture_sizes = (void *) intel_i460_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 3; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_i460_configure; + agp_bridge.fetch_size = intel_i460_fetch_size; + agp_bridge.cleanup = intel_i460_cleanup; + agp_bridge.tlb_flush = intel_i460_tlb_flush; + agp_bridge.mask_memory = intel_i460_mask_memory; + agp_bridge.unmask_memory = intel_i460_unmask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = intel_i460_create_gatt_table; + agp_bridge.free_gatt_table = intel_i460_free_gatt_table; + agp_bridge.insert_memory = intel_i460_insert_memory; + agp_bridge.remove_memory = intel_i460_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = intel_i460_alloc_page; + agp_bridge.agp_destroy_page = intel_i460_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 1; + return 0; +} + diff -Nru a/drivers/char/agp/i810-agp.c b/drivers/char/agp/i810-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/i810-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,594 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + +static struct aper_size_info_fixed intel_i810_sizes[] = +{ + {64, 16384, 4}, + /* The 32M mode still requires a 64k gatt */ + {32, 8192, 4} +}; + +#define AGP_DCACHE_MEMORY 1 +#define AGP_PHYS_MEMORY 2 + +static struct gatt_mask intel_i810_masks[] = +{ + {mask: I810_PTE_VALID, type: 0}, + {mask: (I810_PTE_VALID | I810_PTE_LOCAL), type: AGP_DCACHE_MEMORY}, + {mask: I810_PTE_VALID, type: 0} +}; + +static struct _intel_i810_private { + struct pci_dev *i810_dev; /* device one */ + volatile u8 *registers; + int num_dcache_entries; +} intel_i810_private; + +static int intel_i810_fetch_size(void) +{ + u32 smram_miscc; + struct aper_size_info_fixed *values; + + pci_read_config_dword(agp_bridge.dev, I810_SMRAM_MISCC, &smram_miscc); + values = A_SIZE_FIX(agp_bridge.aperture_sizes); + + if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { + printk(KERN_WARNING PFX "i810 is disabled\n"); + return 0; + } + if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + 1); + agp_bridge.aperture_size_idx = 1; + return values[1].size; + } else { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values); + agp_bridge.aperture_size_idx = 0; + return values[0].size; + } + + return 0; +} + +static int intel_i810_configure(void) +{ + struct aper_size_info_fixed *current_size; + u32 temp; + int i; + + current_size = A_SIZE_FIX(agp_bridge.current_size); + + pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); + temp &= 0xfff80000; + + intel_i810_private.registers = + (volatile u8 *) ioremap(temp, 128 * 4096); + + if ((INREG32(intel_i810_private.registers, I810_DRAM_CTL) + & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { + /* This will need to be dynamically assigned */ + printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); + intel_i810_private.num_dcache_entries = 1024; + } + pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, + agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); + CACHE_FLUSH(); + + if (agp_bridge.needs_scratch_page == TRUE) { + for (i = 0; i < current_size->num_entries; i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + agp_bridge.scratch_page); + } + } + return 0; +} + +static void intel_i810_cleanup(void) +{ + OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, 0); + iounmap((void *) intel_i810_private.registers); +} + +static void intel_i810_tlbflush(agp_memory * mem) +{ + return; +} + +static void intel_i810_agp_enable(u32 mode) +{ + return; +} + +static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, + int type) +{ + int i, j, num_entries; + void *temp; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_FIX(temp)->num_entries; + + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + for (j = pg_start; j < (pg_start + mem->page_count); j++) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + } + + if (type != 0 || mem->type != 0) { + if ((type == AGP_DCACHE_MEMORY) && + (mem->type == AGP_DCACHE_MEMORY)) { + /* special insert */ + CACHE_FLUSH(); + for (i = pg_start; + i < (pg_start + mem->page_count); i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + (i * 4096) | I810_PTE_LOCAL | + I810_PTE_VALID); + } + CACHE_FLUSH(); + agp_bridge.tlb_flush(mem); + return 0; + } + if((type == AGP_PHYS_MEMORY) && + (mem->type == AGP_PHYS_MEMORY)) { + goto insert; + } + return -EINVAL; + } + +insert: + CACHE_FLUSH(); + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (j * 4), mem->memory[i]); + } + CACHE_FLUSH(); + + agp_bridge.tlb_flush(mem); + return 0; +} + +static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + OUTREG32(intel_i810_private.registers, + I810_PTE_BASE + (i * 4), + agp_bridge.scratch_page); + } + + CACHE_FLUSH(); + agp_bridge.tlb_flush(mem); + return 0; +} + +static agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) +{ + agp_memory *new; + + if (type == AGP_DCACHE_MEMORY) { + if (pg_count != intel_i810_private.num_dcache_entries) { + return NULL; + } + new = agp_create_memory(1); + + if (new == NULL) { + return NULL; + } + new->type = AGP_DCACHE_MEMORY; + new->page_count = pg_count; + new->num_scratch_pages = 0; + vfree(new->memory); + MOD_INC_USE_COUNT; + return new; + } + if(type == AGP_PHYS_MEMORY) { + void *addr; + /* The I810 requires a physical address to program + * it's mouse pointer into hardware. However the + * Xserver still writes to it through the agp + * aperture + */ + if (pg_count != 1) { + return NULL; + } + new = agp_create_memory(1); + + if (new == NULL) { + return NULL; + } + MOD_INC_USE_COUNT; + addr = agp_bridge.agp_alloc_page(); + + if (addr == NULL) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr), type); + new->page_count = 1; + new->num_scratch_pages = 1; + new->type = AGP_PHYS_MEMORY; + new->physical = virt_to_phys((void *) new->memory[0]); + return new; + } + + return NULL; +} + +static void intel_i810_free_by_type(agp_memory * curr) +{ + agp_free_key(curr->key); + if(curr->type == AGP_PHYS_MEMORY) { + agp_bridge.agp_destroy_page( + phys_to_virt(curr->memory[0])); + vfree(curr->memory); + } + kfree(curr); + MOD_DEC_USE_COUNT; +} + +static unsigned long intel_i810_mask_memory(unsigned long addr, int type) +{ + /* Type checking must be done elsewhere */ + return addr | agp_bridge.masks[type].mask; +} + +int __init intel_i810_setup(struct pci_dev *i810_dev) +{ + intel_i810_private.i810_dev = i810_dev; + + agp_bridge.masks = intel_i810_masks; + agp_bridge.num_of_masks = 2; + agp_bridge.aperture_sizes = (void *) intel_i810_sizes; + agp_bridge.size_type = FIXED_APER_SIZE; + agp_bridge.num_aperture_sizes = 2; + agp_bridge.dev_private_data = (void *) &intel_i810_private; + agp_bridge.needs_scratch_page = TRUE; + agp_bridge.configure = intel_i810_configure; + agp_bridge.fetch_size = intel_i810_fetch_size; + agp_bridge.cleanup = intel_i810_cleanup; + agp_bridge.tlb_flush = intel_i810_tlbflush; + agp_bridge.mask_memory = intel_i810_mask_memory; + agp_bridge.agp_enable = intel_i810_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = intel_i810_insert_entries; + agp_bridge.remove_memory = intel_i810_remove_entries; + agp_bridge.alloc_by_type = intel_i810_alloc_by_type; + agp_bridge.free_by_type = intel_i810_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; +} + +static struct aper_size_info_fixed intel_i830_sizes[] = +{ + {128, 32768, 5}, + /* The 64M mode still requires a 128k gatt */ + {64, 16384, 5} +}; + +static struct _intel_i830_private { + struct pci_dev *i830_dev; /* device one */ + volatile u8 *registers; + int gtt_entries; +} intel_i830_private; + +static void intel_i830_init_gtt_entries(void) +{ + u16 gmch_ctrl; + int gtt_entries; + u8 rdct; + static const int ddt[4] = { 0, 16, 32, 64 }; + + pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); + + switch (gmch_ctrl & I830_GMCH_GMS_MASK) { + case I830_GMCH_GMS_STOLEN_512: + gtt_entries = KB(512) - KB(132); + printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1)); + break; + case I830_GMCH_GMS_STOLEN_1024: + gtt_entries = MB(1) - KB(132); + printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1)); + break; + case I830_GMCH_GMS_STOLEN_8192: + gtt_entries = MB(8) - KB(132); + printk(KERN_INFO PFX "detected %dK stolen memory.\n",gtt_entries / KB(1)); + break; + case I830_GMCH_GMS_LOCAL: + rdct = INREG8(intel_i830_private.registers,I830_RDRAM_CHANNEL_TYPE); + gtt_entries = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]); + printk(KERN_INFO PFX "detected %dK local memory.\n",gtt_entries / KB(1)); + break; + default: + printk(KERN_INFO PFX "no video memory detected.\n"); + gtt_entries = 0; + break; + } + + gtt_entries /= KB(4); + + intel_i830_private.gtt_entries = gtt_entries; +} + +/* The intel i830 automatically initializes the agp aperture during POST. + * Use the memory already set aside for in the GTT. + */ +static int intel_i830_create_gatt_table(void) +{ + int page_order; + struct aper_size_info_fixed *size; + int num_entries; + u32 temp; + + size = agp_bridge.current_size; + page_order = size->page_order; + num_entries = size->num_entries; + agp_bridge.gatt_table_real = 0; + + pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp); + temp &= 0xfff80000; + + intel_i830_private.registers = (volatile u8 *) ioremap(temp,128 * 4096); + if (!intel_i830_private.registers) return (-ENOMEM); + + temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000; + CACHE_FLUSH(); + + /* we have to call this as early as possible after the MMIO base address is known */ + intel_i830_init_gtt_entries(); + + agp_bridge.gatt_table = NULL; + + agp_bridge.gatt_bus_addr = temp; + + return(0); +} + +/* Return the gatt table to a sane state. Use the top of stolen + * memory for the GTT. + */ +static int intel_i830_free_gatt_table(void) +{ + return(0); +} + +static int intel_i830_fetch_size(void) +{ + u16 gmch_ctrl; + struct aper_size_info_fixed *values; + + pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); + values = A_SIZE_FIX(agp_bridge.aperture_sizes); + + if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { + agp_bridge.previous_size = agp_bridge.current_size = (void *) values; + agp_bridge.aperture_size_idx = 0; + return(values[0].size); + } else { + agp_bridge.previous_size = agp_bridge.current_size = (void *) values; + agp_bridge.aperture_size_idx = 1; + return(values[1].size); + } + + return(0); +} + +static int intel_i830_configure(void) +{ + struct aper_size_info_fixed *current_size; + u32 temp; + u16 gmch_ctrl; + int i; + + current_size = A_SIZE_FIX(agp_bridge.current_size); + + pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_word(agp_bridge.dev,I830_GMCH_CTRL,&gmch_ctrl); + gmch_ctrl |= I830_GMCH_ENABLED; + pci_write_config_word(agp_bridge.dev,I830_GMCH_CTRL,gmch_ctrl); + + OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge.gatt_bus_addr | I810_PGETBL_ENABLED); + CACHE_FLUSH(); + + if (agp_bridge.needs_scratch_page == TRUE) + for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page); + + return (0); +} + +static void intel_i830_cleanup(void) +{ + iounmap((void *) intel_i830_private.registers); +} + +static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type) +{ + int i,j,num_entries; + void *temp; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_FIX(temp)->num_entries; + + if (pg_start < intel_i830_private.gtt_entries) { + printk (KERN_DEBUG "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n", + pg_start,intel_i830_private.gtt_entries); + + printk ("Trying to insert into local/stolen memory\n"); + return (-EINVAL); + } + + if ((pg_start + mem->page_count) > num_entries) + return (-EINVAL); + + /* The i830 can't check the GTT for entries since its read only, + * depend on the caller to make the correct offset decisions. + */ + + if ((type != 0 && type != AGP_PHYS_MEMORY) || + (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) + return (-EINVAL); + + CACHE_FLUSH(); + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]); + + CACHE_FLUSH(); + + agp_bridge.tlb_flush(mem); + + return(0); +} + +static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type) +{ + int i; + + CACHE_FLUSH (); + + if (pg_start < intel_i830_private.gtt_entries) { + printk ("Trying to disable local/stolen memory\n"); + return (-EINVAL); + } + + for (i = pg_start; i < (mem->page_count + pg_start); i++) + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge.scratch_page); + + CACHE_FLUSH(); + + agp_bridge.tlb_flush(mem); + + return (0); +} + +static agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type) +{ + agp_memory *nw; + + /* always return NULL for now */ + if (type == AGP_DCACHE_MEMORY) return(NULL); + + if (type == AGP_PHYS_MEMORY) { + void *addr; + + /* The i830 requires a physical address to program + * it's mouse pointer into hardware. However the + * Xserver still writes to it through the agp + * aperture + */ + + if (pg_count != 1) return(NULL); + + nw = agp_create_memory(1); + + if (nw == NULL) return(NULL); + + MOD_INC_USE_COUNT; + addr = agp_bridge.agp_alloc_page(); + if (addr == NULL) { + /* free this structure */ + agp_free_memory(nw); + return(NULL); + } + + nw->memory[0] = agp_bridge.mask_memory(virt_to_phys(addr),type); + nw->page_count = 1; + nw->num_scratch_pages = 1; + nw->type = AGP_PHYS_MEMORY; + nw->physical = virt_to_phys(addr); + return(nw); + } + + return(NULL); +} + +int __init intel_i830_setup(struct pci_dev *i830_dev) +{ + intel_i830_private.i830_dev = i830_dev; + + agp_bridge.masks = intel_i810_masks; + agp_bridge.num_of_masks = 3; + agp_bridge.aperture_sizes = (void *) intel_i830_sizes; + agp_bridge.size_type = FIXED_APER_SIZE; + agp_bridge.num_aperture_sizes = 2; + + agp_bridge.dev_private_data = (void *) &intel_i830_private; + agp_bridge.needs_scratch_page = TRUE; + + agp_bridge.configure = intel_i830_configure; + agp_bridge.fetch_size = intel_i830_fetch_size; + agp_bridge.cleanup = intel_i830_cleanup; + agp_bridge.tlb_flush = intel_i810_tlbflush; + agp_bridge.mask_memory = intel_i810_mask_memory; + agp_bridge.agp_enable = intel_i810_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + + agp_bridge.create_gatt_table = intel_i830_create_gatt_table; + agp_bridge.free_gatt_table = intel_i830_free_gatt_table; + + agp_bridge.insert_memory = intel_i830_insert_entries; + agp_bridge.remove_memory = intel_i830_remove_entries; + agp_bridge.alloc_by_type = intel_i830_alloc_by_type; + agp_bridge.free_by_type = intel_i810_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return(0); +} + diff -Nru a/drivers/char/agp/i8x0-agp.c b/drivers/char/agp/i8x0-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/i8x0-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,726 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + + +static int intel_fetch_size(void) +{ + int i; + u16 temp; + struct aper_size_info_16 *values; + + pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp); + values = A_SIZE_16(agp_bridge.aperture_sizes); + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int intel_8xx_fetch_size(void) +{ + int i; + u8 temp; + struct aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp); + + /* Intel 815 chipsets have a _weird_ APSIZE register with only + * one non-reserved bit, so mask the others out ... */ + if (agp_bridge.type == INTEL_I815) + temp &= (1 << 3); + + values = A_SIZE_8(agp_bridge.aperture_sizes); + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + + +static void intel_tlbflush(agp_memory * mem) +{ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); +} + + +static void intel_8xx_tlbflush(agp_memory * mem) +{ + u32 temp; + pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7)); + pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7)); +} + + +static void intel_cleanup(void) +{ + u16 temp; + struct aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge.previous_size); + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, + previous_size->size_value); +} + + +static void intel_8xx_cleanup(void) +{ + u16 temp; + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9)); + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + previous_size->size_value); +} + + +static int intel_configure(void) +{ + u32 temp; + u16 temp2; + struct aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_word(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280); + + /* paccfg/nbxcfg */ + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, + (temp2 & ~(1 << 10)) | (1 << 9)); + /* clear any possible error conditions */ + pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7); + return 0; +} + +static int intel_815_configure(void) +{ + u32 temp, addr; + u8 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + /* the Intel 815 chipset spec. says that bits 29-31 in the + * ATTBASE register are reserved -> try not to write them */ + if (agp_bridge.gatt_bus_addr & INTEL_815_ATTBASE_MASK) + panic("gatt bus addr too high"); + pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr); + addr &= INTEL_815_ATTBASE_MASK; + addr |= agp_bridge.gatt_bus_addr; + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* apcont */ + pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2); + pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT, temp2 | (1 << 1)); + + /* clear any possible error conditions */ + /* Oddness : this chipset seems to have no ERRSTS register ! */ + return 0; +} + +static void intel_820_tlbflush(agp_memory * mem) +{ + return; +} + +static void intel_820_cleanup(void) +{ + u8 temp; + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp); + pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, + temp & ~(1 << 1)); + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + previous_size->size_value); +} + + +static int intel_820_configure(void) +{ + u32 temp; + u8 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* global enable aperture access */ + /* This flag is not accessed through MCHCFG register as in */ + /* i850 chipset. */ + pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2); + pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, + temp2 | (1 << 1)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); + return 0; +} + +static int intel_840_configure(void) +{ + u32 temp; + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* mcgcfg */ + pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, + temp2 | (1 << 9)); + /* clear any possible error conditions */ + pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); + return 0; +} + +static int intel_845_configure(void) +{ + u32 temp; + u8 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* agpm */ + pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2); + pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM, + temp2 | (1 << 1)); + /* clear any possible error conditions */ + pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); + return 0; +} + +static int intel_850_configure(void) +{ + u32 temp; + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* mcgcfg */ + pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, + temp2 | (1 << 9)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); + return 0; +} + +static int intel_860_configure(void) +{ + u32 temp; + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* mcgcfg */ + pci_read_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_I860_MCHCFG, + temp2 | (1 << 9)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge.dev, INTEL_I860_ERRSTS, 0xf700); + return 0; +} + +static int intel_830mp_configure(void) +{ + u32 temp; + u16 temp2; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture base */ + pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); + + /* gmch */ + pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2); + pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, + temp2 | (1 << 9)); + /* clear any possible AGP-related error conditions */ + pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c); + return 0; +} + +static unsigned long intel_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static void intel_resume(void) +{ + intel_configure(); +} + +/* Setup function */ +static struct gatt_mask intel_generic_masks[] = +{ + {mask: 0x00000017, type: 0} +}; + +static struct aper_size_info_8 intel_815_sizes[2] = +{ + {64, 16384, 4, 0}, + {32, 8192, 3, 8}, +}; + +static struct aper_size_info_8 intel_8xx_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56}, + {16, 4096, 2, 60}, + {8, 2048, 1, 62}, + {4, 1024, 0, 63} +}; + +static struct aper_size_info_16 intel_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56}, + {16, 4096, 2, 60}, + {8, 2048, 1, 62}, + {4, 1024, 0, 63} +}; + +static struct aper_size_info_8 intel_830mp_sizes[4] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 32}, + {64, 16384, 4, 48}, + {32, 8192, 3, 56} +}; + +int __init intel_generic_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_generic_sizes; + agp_bridge.size_type = U16_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_configure; + agp_bridge.fetch_size = intel_fetch_size; + agp_bridge.cleanup = intel_cleanup; + agp_bridge.tlb_flush = intel_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = intel_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + +int __init intel_815_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_815_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 2; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_815_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_8xx_cleanup; + agp_bridge.tlb_flush = intel_8xx_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; +} + + +int __init intel_820_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_820_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_820_cleanup; + agp_bridge.tlb_flush = intel_820_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + +int __init intel_830mp_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_830mp_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 4; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_830mp_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_8xx_cleanup; + agp_bridge.tlb_flush = intel_8xx_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + +int __init intel_840_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_840_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_8xx_cleanup; + agp_bridge.tlb_flush = intel_8xx_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + +int __init intel_845_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_845_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_8xx_cleanup; + agp_bridge.tlb_flush = intel_8xx_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + +int __init intel_850_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_850_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_8xx_cleanup; + agp_bridge.tlb_flush = intel_8xx_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + +int __init intel_860_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_860_configure; + agp_bridge.fetch_size = intel_8xx_fetch_size; + agp_bridge.cleanup = intel_8xx_cleanup; + agp_bridge.tlb_flush = intel_8xx_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} + diff -Nru a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/sis-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,142 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + +static int sis_fetch_size(void) +{ + u8 temp_size; + int i; + struct aper_size_info_8 *values; + + pci_read_config_byte(agp_bridge.dev, SIS_APSIZE, &temp_size); + values = A_SIZE_8(agp_bridge.aperture_sizes); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if ((temp_size == values[i].size_value) || + ((temp_size & ~(0x03)) == + (values[i].size_value & ~(0x03)))) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + + +static void sis_tlbflush(agp_memory * mem) +{ + pci_write_config_byte(agp_bridge.dev, SIS_TLBFLUSH, 0x02); +} + +static int sis_configure(void) +{ + u32 temp; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + pci_write_config_byte(agp_bridge.dev, SIS_TLBCNTRL, 0x05); + pci_read_config_dword(agp_bridge.dev, SIS_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + pci_write_config_dword(agp_bridge.dev, SIS_ATTBASE, + agp_bridge.gatt_bus_addr); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + current_size->size_value); + return 0; +} + +static void sis_cleanup(void) +{ + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, SIS_APSIZE, + (previous_size->size_value & ~(0x03))); +} + +static unsigned long sis_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static struct aper_size_info_8 sis_generic_sizes[7] = +{ + {256, 65536, 6, 99}, + {128, 32768, 5, 83}, + {64, 16384, 4, 67}, + {32, 8192, 3, 51}, + {16, 4096, 2, 35}, + {8, 2048, 1, 19}, + {4, 1024, 0, 3} +}; + +static struct gatt_mask sis_generic_masks[] = +{ + {mask: 0x00000000, type: 0} +}; + +int __init sis_generic_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = sis_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) sis_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = sis_configure; + agp_bridge.fetch_size = sis_fetch_size; + agp_bridge.cleanup = sis_cleanup; + agp_bridge.tlb_flush = sis_tlbflush; + agp_bridge.mask_memory = sis_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; +} + diff -Nru a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/sworks-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,626 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ + +#include +#include +#include +#include +#include "agp.h" + +struct serverworks_page_map { + unsigned long *real; + unsigned long *remapped; +}; + +static struct _serverworks_private { + struct pci_dev *svrwrks_dev; /* device one */ + volatile u8 *registers; + struct serverworks_page_map **gatt_pages; + int num_tables; + struct serverworks_page_map scratch_dir; + + int gart_addr_ofs; + int mm_addr_ofs; +} serverworks_private; + +static int serverworks_create_page_map(struct serverworks_page_map *page_map) +{ + int i; + + page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL); + if (page_map->real == NULL) { + return -ENOMEM; + } + SetPageReserved(virt_to_page(page_map->real)); + CACHE_FLUSH(); + page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), + PAGE_SIZE); + if (page_map->remapped == NULL) { + ClearPageReserved(virt_to_page(page_map->real)); + free_page((unsigned long) page_map->real); + page_map->real = NULL; + return -ENOMEM; + } + CACHE_FLUSH(); + + for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { + page_map->remapped[i] = agp_bridge.scratch_page; + } + + return 0; +} + +static void serverworks_free_page_map(struct serverworks_page_map *page_map) +{ + iounmap(page_map->remapped); + ClearPageReserved(virt_to_page(page_map->real)); + free_page((unsigned long) page_map->real); +} + +static void serverworks_free_gatt_pages(void) +{ + int i; + struct serverworks_page_map **tables; + struct serverworks_page_map *entry; + + tables = serverworks_private.gatt_pages; + for(i = 0; i < serverworks_private.num_tables; i++) { + entry = tables[i]; + if (entry != NULL) { + if (entry->real != NULL) { + serverworks_free_page_map(entry); + } + kfree(entry); + } + } + kfree(tables); +} + +static int serverworks_create_gatt_pages(int nr_tables) +{ + struct serverworks_page_map **tables; + struct serverworks_page_map *entry; + int retval = 0; + int i; + + tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), + GFP_KERNEL); + if (tables == NULL) { + return -ENOMEM; + } + memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1)); + for (i = 0; i < nr_tables; i++) { + entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL); + if (entry == NULL) { + retval = -ENOMEM; + break; + } + memset(entry, 0, sizeof(struct serverworks_page_map)); + tables[i] = entry; + retval = serverworks_create_page_map(entry); + if (retval != 0) break; + } + serverworks_private.num_tables = nr_tables; + serverworks_private.gatt_pages = tables; + + if (retval != 0) serverworks_free_gatt_pages(); + + return retval; +} + +#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\ + GET_PAGE_DIR_IDX(addr)]->remapped) + +#ifndef GET_PAGE_DIR_OFF +#define GET_PAGE_DIR_OFF(addr) (addr >> 22) +#endif + +#ifndef GET_PAGE_DIR_IDX +#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \ + GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr)) +#endif + +#ifndef GET_GATT_OFF +#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) +#endif + +static int serverworks_create_gatt_table(void) +{ + struct aper_size_info_lvl2 *value; + struct serverworks_page_map page_dir; + int retval; + u32 temp; + int i; + + value = A_SIZE_LVL2(agp_bridge.current_size); + retval = serverworks_create_page_map(&page_dir); + if (retval != 0) { + return retval; + } + retval = serverworks_create_page_map(&serverworks_private.scratch_dir); + if (retval != 0) { + serverworks_free_page_map(&page_dir); + return retval; + } + /* Create a fake scratch directory */ + for(i = 0; i < 1024; i++) { + serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page; + page_dir.remapped[i] = + virt_to_phys(serverworks_private.scratch_dir.real); + page_dir.remapped[i] |= 0x00000001; + } + + retval = serverworks_create_gatt_pages(value->num_entries / 1024); + if (retval != 0) { + serverworks_free_page_map(&page_dir); + serverworks_free_page_map(&serverworks_private.scratch_dir); + return retval; + } + + agp_bridge.gatt_table_real = page_dir.real; + agp_bridge.gatt_table = page_dir.remapped; + agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); + + /* Get the address for the gart region. + * This is a bus address even on the alpha, b/c its + * used to program the agp master not the cpu + */ + + pci_read_config_dword(agp_bridge.dev, + serverworks_private.gart_addr_ofs, + &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* Calculate the agp offset */ + + for(i = 0; i < value->num_entries / 1024; i++) { + page_dir.remapped[i] = + virt_to_phys(serverworks_private.gatt_pages[i]->real); + page_dir.remapped[i] |= 0x00000001; + } + + return 0; +} + +static int serverworks_free_gatt_table(void) +{ + struct serverworks_page_map page_dir; + + page_dir.real = agp_bridge.gatt_table_real; + page_dir.remapped = agp_bridge.gatt_table; + + serverworks_free_gatt_pages(); + serverworks_free_page_map(&page_dir); + serverworks_free_page_map(&serverworks_private.scratch_dir); + return 0; +} + +static int serverworks_fetch_size(void) +{ + int i; + u32 temp; + u32 temp2; + struct aper_size_info_lvl2 *values; + + values = A_SIZE_LVL2(agp_bridge.aperture_sizes); + pci_read_config_dword(agp_bridge.dev, + serverworks_private.gart_addr_ofs, + &temp); + pci_write_config_dword(agp_bridge.dev, + serverworks_private.gart_addr_ofs, + SVWRKS_SIZE_MASK); + pci_read_config_dword(agp_bridge.dev, + serverworks_private.gart_addr_ofs, + &temp2); + pci_write_config_dword(agp_bridge.dev, + serverworks_private.gart_addr_ofs, + temp); + temp2 &= SVWRKS_SIZE_MASK; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp2 == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int serverworks_configure(void) +{ + struct aper_size_info_lvl2 *current_size; + u32 temp; + u8 enable_reg; + u8 cap_ptr; + u32 cap_id; + u16 cap_reg; + + current_size = A_SIZE_LVL2(agp_bridge.current_size); + + /* Get the memory mapped registers */ + pci_read_config_dword(agp_bridge.dev, + serverworks_private.mm_addr_ofs, + &temp); + temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); + serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096); + + OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a); + + OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE, + agp_bridge.gatt_bus_addr); + + cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND); + cap_reg &= ~0x0007; + cap_reg |= 0x4; + OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg); + + pci_read_config_byte(serverworks_private.svrwrks_dev, + SVWRKS_AGP_ENABLE, &enable_reg); + enable_reg |= 0x1; /* Agp Enable bit */ + pci_write_config_byte(serverworks_private.svrwrks_dev, + SVWRKS_AGP_ENABLE, enable_reg); + agp_bridge.tlb_flush(NULL); + + pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr); + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(serverworks_private.svrwrks_dev, + cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + agp_bridge.capndx = cap_ptr; + + /* Fill in the mode register */ + pci_read_config_dword(serverworks_private.svrwrks_dev, + agp_bridge.capndx + 4, + &agp_bridge.mode); + + pci_read_config_byte(agp_bridge.dev, + SVWRKS_CACHING, + &enable_reg); + enable_reg &= ~0x3; + pci_write_config_byte(agp_bridge.dev, + SVWRKS_CACHING, + enable_reg); + + pci_read_config_byte(agp_bridge.dev, + SVWRKS_FEATURE, + &enable_reg); + enable_reg |= (1<<6); + pci_write_config_byte(agp_bridge.dev, + SVWRKS_FEATURE, + enable_reg); + + return 0; +} + +static void serverworks_cleanup(void) +{ + iounmap((void *) serverworks_private.registers); +} + +/* + * This routine could be implemented by taking the addresses + * written to the GATT, and flushing them individually. However + * currently it just flushes the whole table. Which is probably + * more efficent, since agp_memory blocks can be a large number of + * entries. + */ + +static void serverworks_tlbflush(agp_memory * temp) +{ + unsigned long end; + + OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01); + end = jiffies + 3*HZ; + while(INREG8(serverworks_private.registers, + SVWRKS_POSTFLUSH) == 0x01) { + if((signed)(end - jiffies) <= 0) { + printk(KERN_ERR "Posted write buffer flush took more" + "then 3 seconds\n"); + } + } + OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001); + end = jiffies + 3*HZ; + while(INREG32(serverworks_private.registers, + SVWRKS_DIRFLUSH) == 0x00000001) { + if((signed)(end - jiffies) <= 0) { + printk(KERN_ERR "TLB flush took more" + "then 3 seconds\n"); + } + } +} + +static unsigned long serverworks_mask_memory(unsigned long addr, int type) +{ + /* Only type 0 is supported by the serverworks chipsets */ + + return addr | agp_bridge.masks[0].mask; +} + +static int serverworks_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, num_entries; + unsigned long *cur_gatt; + unsigned long addr; + + num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + if ((pg_start + mem->page_count) > num_entries) { + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + mem->page_count)) { + addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = SVRWRKS_GET_GATT(addr); + if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { + return -EBUSY; + } + j++; + } + + if (mem->is_flushed == FALSE) { + CACHE_FLUSH(); + mem->is_flushed = TRUE; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = SVRWRKS_GET_GATT(addr); + cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + } + agp_bridge.tlb_flush(mem); + return 0; +} + +static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + unsigned long *cur_gatt; + unsigned long addr; + + if (type != 0 || mem->type != 0) { + return -EINVAL; + } + + CACHE_FLUSH(); + agp_bridge.tlb_flush(mem); + + for (i = pg_start; i < (mem->page_count + pg_start); i++) { + addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr; + cur_gatt = SVRWRKS_GET_GATT(addr); + cur_gatt[GET_GATT_OFF(addr)] = + (unsigned long) agp_bridge.scratch_page; + } + + agp_bridge.tlb_flush(mem); + return 0; +} + +static struct gatt_mask serverworks_masks[] = +{ + {mask: 0x00000001, type: 0} +}; + +static struct aper_size_info_lvl2 serverworks_sizes[7] = +{ + {2048, 524288, 0x80000000}, + {1024, 262144, 0xc0000000}, + {512, 131072, 0xe0000000}, + {256, 65536, 0xf0000000}, + {128, 32768, 0xf8000000}, + {64, 16384, 0xfc000000}, + {32, 8192, 0xfe000000} +}; + +static void serverworks_agp_enable(u32 mode) +{ + struct pci_dev *device = NULL; + u32 command, scratch, cap_id; + u8 cap_ptr; + + pci_read_config_dword(serverworks_private.svrwrks_dev, + agp_bridge.capndx + 4, + &command); + + /* + * PASS1: go throu all devices that claim to be + * AGP devices and collect their data. + */ + + + pci_for_each_dev(device) { + cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); + if (cap_ptr != 0x00) { + do { + pci_read_config_dword(device, + cap_ptr, &cap_id); + + if ((cap_id & 0xff) != 0x02) + cap_ptr = (cap_id >> 8) & 0xff; + } + while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); + } + if (cap_ptr != 0x00) { + /* + * Ok, here we have a AGP device. Disable impossible + * settings, and adjust the readqueue to the minimum. + */ + + pci_read_config_dword(device, cap_ptr + 4, &scratch); + + /* adjust RQ depth */ + command = + ((command & ~0xff000000) | + min_t(u32, (mode & 0xff000000), + min_t(u32, (command & 0xff000000), + (scratch & 0xff000000)))); + + /* disable SBA if it's not supported */ + if (!((command & 0x00000200) && + (scratch & 0x00000200) && + (mode & 0x00000200))) + command &= ~0x00000200; + + /* disable FW */ + command &= ~0x00000010; + + command &= ~0x00000008; + + if (!((command & 4) && + (scratch & 4) && + (mode & 4))) + command &= ~0x00000004; + + if (!((command & 2) && + (scratch & 2) && + (mode & 2))) + command &= ~0x00000002; + + if (!((command & 1) && + (scratch & 1) && + (mode & 1))) + command &= ~0x00000001; + } + } + /* + * PASS2: Figure out the 4X/2X/1X setting and enable the + * target (our motherboard chipset). + */ + + if (command & 4) { + command &= ~3; /* 4X */ + } + if (command & 2) { + command &= ~5; /* 2X */ + } + if (command & 1) { + command &= ~6; /* 1X */ + } + command |= 0x00000100; + + pci_write_config_dword(serverworks_private.svrwrks_dev, + agp_bridge.capndx + 8, + command); + + /* + * PASS3: Go throu all AGP devices and update the + * command registers. + */ + + pci_for_each_dev(device) { + cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); + if (cap_ptr != 0x00) + pci_write_config_dword(device, cap_ptr + 8, command); + } +} + +int __init serverworks_setup (struct pci_dev *pdev) +{ + u32 temp; + u32 temp2; + + serverworks_private.svrwrks_dev = pdev; + + agp_bridge.masks = serverworks_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) serverworks_sizes; + agp_bridge.size_type = LVL2_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = (void *) &serverworks_private; + agp_bridge.needs_scratch_page = TRUE; + agp_bridge.configure = serverworks_configure; + agp_bridge.fetch_size = serverworks_fetch_size; + agp_bridge.cleanup = serverworks_cleanup; + agp_bridge.tlb_flush = serverworks_tlbflush; + agp_bridge.mask_memory = serverworks_mask_memory; + agp_bridge.agp_enable = serverworks_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = serverworks_create_gatt_table; + agp_bridge.free_gatt_table = serverworks_free_gatt_table; + agp_bridge.insert_memory = serverworks_insert_memory; + agp_bridge.remove_memory = serverworks_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + pci_read_config_dword(agp_bridge.dev, + SVWRKS_APSIZE, + &temp); + + serverworks_private.gart_addr_ofs = 0x10; + + if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_read_config_dword(agp_bridge.dev, + SVWRKS_APSIZE + 4, + &temp2); + if(temp2 != 0) { + printk("Detected 64 bit aperture address, but top " + "bits are not zero. Disabling agp\n"); + return -ENODEV; + } + serverworks_private.mm_addr_ofs = 0x18; + } else { + serverworks_private.mm_addr_ofs = 0x14; + } + + pci_read_config_dword(agp_bridge.dev, + serverworks_private.mm_addr_ofs, + &temp); + if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_read_config_dword(agp_bridge.dev, + serverworks_private.mm_addr_ofs + 4, + &temp2); + if(temp2 != 0) { + printk("Detected 64 bit MMIO address, but top " + "bits are not zero. Disabling agp\n"); + return -ENODEV; + } + } + + return 0; +} + diff -Nru a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/drivers/char/agp/via-agp.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,151 @@ +/* + * AGPGART module version 0.99 + * Copyright (C) 1999 Jeff Hartmann + * Copyright (C) 1999 Precision Insight, Inc. + * Copyright (C) 1999 Xi Graphics, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * TODO: + * - Allocate more than order 0 pages to avoid too much linear map splitting. + */ +#include +#include +#include +#include +#include +#include +#include "agp.h" + + +static int via_fetch_size(void) +{ + int i; + u8 temp; + struct aper_size_info_8 *values; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + pci_read_config_byte(agp_bridge.dev, VIA_APSIZE, &temp); + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +static int via_configure(void) +{ + u32 temp; + struct aper_size_info_8 *current_size; + + current_size = A_SIZE_8(agp_bridge.current_size); + /* aperture size */ + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + current_size->size_value); + /* address to map too */ + pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* GART control register */ + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge.dev, VIA_ATTBASE, + (agp_bridge.gatt_bus_addr & 0xfffff000) | 3); + return 0; +} + +static void via_cleanup(void) +{ + struct aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, + previous_size->size_value); + /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up + * during reinitialization. + */ +} + +static void via_tlbflush(agp_memory * mem) +{ + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000008f); + pci_write_config_dword(agp_bridge.dev, VIA_GARTCTRL, 0x0000000f); +} + +static unsigned long via_mask_memory(unsigned long addr, int type) +{ + /* Memory type is ignored */ + + return addr | agp_bridge.masks[0].mask; +} + +static struct aper_size_info_8 via_generic_sizes[7] = +{ + {256, 65536, 6, 0}, + {128, 32768, 5, 128}, + {64, 16384, 4, 192}, + {32, 8192, 3, 224}, + {16, 4096, 2, 240}, + {8, 2048, 1, 248}, + {4, 1024, 0, 252} +}; + +static struct gatt_mask via_generic_masks[] = +{ + {mask: 0x00000000, type: 0} +}; + +int __init via_generic_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = via_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) via_generic_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = via_configure; + agp_bridge.fetch_size = via_fetch_size; + agp_bridge.cleanup = via_cleanup; + agp_bridge.tlb_flush = via_tlbflush; + agp_bridge.mask_memory = via_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + + return 0; + + (void) pdev; /* unused */ +} diff -Nru a/drivers/ide/Config.help b/drivers/ide/Config.help --- a/drivers/ide/Config.help Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/Config.help Sat Jul 20 12:12:35 2002 @@ -708,7 +708,14 @@ Digital drives in the Expert series (by nature of really being IBM drives). - If you have such a drive, say Y here. + However, please, note that there are host chip controllers which will + not cooperate properly if TCQ is enabled. This may cause serious + data loss! + + Since enabling TCQ doesn't appear to have any noticeable performance + impact on Linux: [feel free to correct me if you wish too please] + + Generally say N here. CONFIG_BLK_DEV_IDE_TCQ_DEPTH Maximum size of commands to enable per-drive. Any value between 1 diff -Nru a/drivers/ide/Config.in b/drivers/ide/Config.in --- a/drivers/ide/Config.in Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/Config.in Sat Jul 20 12:12:35 2002 @@ -34,7 +34,7 @@ dep_bool ' Use PCI DMA by default when available' CONFIG_IDEDMA_PCI_AUTO $CONFIG_BLK_DEV_IDEDMA_PCI dep_bool ' Enable DMA only for disks ' CONFIG_IDEDMA_ONLYDISK $CONFIG_IDEDMA_PCI_AUTO define_bool CONFIG_BLK_DEV_IDEDMA $CONFIG_BLK_DEV_IDEDMA_PCI - dep_bool ' ATA tagged command queueing (EXPERIMENTAL)' CONFIG_BLK_DEV_IDE_TCQ $CONFIG_BLK_DEV_IDEDMA_PCI $CONFIG_EXPERIMENTAL + dep_bool ' ATA tagged command queueing (DANGEROUS)' CONFIG_BLK_DEV_IDE_TCQ $CONFIG_BLK_DEV_IDEDMA_PCI $CONFIG_EXPERIMENTAL dep_bool ' TCQ on by default' CONFIG_BLK_DEV_IDE_TCQ_DEFAULT $CONFIG_BLK_DEV_IDE_TCQ if [ "$CONFIG_BLK_DEV_IDE_TCQ" != "n" ]; then int ' Default queue depth' CONFIG_BLK_DEV_IDE_TCQ_DEPTH 32 diff -Nru a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c --- a/drivers/ide/aec62xx.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/aec62xx.c Sat Jul 20 12:12:35 2002 @@ -61,22 +61,25 @@ #define AEC_CABLEPINS_INPUT 0x10 static unsigned char aec_cyc2udma[9] = { 5, 5, 5, 4, 3, 2, 2, 1, 1 }; -static unsigned char aec_cyc2act[16] = { 1, 1, 2, 3, 4, 5, 6, 0, 0, 7, 7, 7, 7, 7, 7, 7 }; -static unsigned char aec_cyc2rec[16] = { 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 12, 13, 14 }; +static unsigned char aec_cyc2act[16] = + { 1, 1, 2, 3, 4, 5, 6, 0, 0, 7, 7, 7, 7, 7, 7, 7 }; +static unsigned char aec_cyc2rec[16] = + { 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 12, 13, 14 }; /* * aec_set_speed_old() writes timing values to * the chipset registers for ATP850UF */ -static void aec_set_speed_old(struct pci_dev *dev, unsigned char dn, struct ata_timing *timing) +static void aec_set_speed_old(struct pci_dev *dev, unsigned char dn, + struct ata_timing *timing) { unsigned char t; pci_write_config_byte(dev, AEC_DRIVE_TIMING + (dn << 1), - aec_cyc2act[FIT(timing->active, 0, 15)]); + aec_cyc2act[FIT(timing->active, 0, 15)]); pci_write_config_byte(dev, AEC_DRIVE_TIMING + (dn << 1) + 1, - aec_cyc2rec[FIT(timing->recover, 0, 15)]); + aec_cyc2rec[FIT(timing->recover, 0, 15)]); pci_read_config_byte(dev, AEC_UDMA_OLD, &t); t &= ~(3 << (dn << 1)); @@ -90,19 +93,22 @@ * other Artop chips */ -static void aec_set_speed_new(struct pci_dev *dev, unsigned char dn, struct ata_timing *timing) +static void aec_set_speed_new(struct pci_dev *dev, unsigned char dn, + struct ata_timing *timing) { unsigned char t; pci_write_config_byte(dev, AEC_DRIVE_TIMING + dn, - (aec_cyc2act[FIT(timing->active, 0, 15)] << 4) - | aec_cyc2rec[FIT(timing->recover, 0, 15)]); + (aec_cyc2act[FIT(timing->active, 0, 15)] << + 4) + | aec_cyc2rec[FIT(timing->recover, 0, 15)]); pci_read_config_byte(dev, AEC_UDMA_NEW + (dn >> 1), &t); t &= ~(0xf << ((dn & 1) << 2)); if (timing->udma) { if (timing->udma >= 2) - t |= aec_cyc2udma[FIT(timing->udma, 2, 8)] << ((dn & 1) << 2); + t |= aec_cyc2udma[FIT(timing->udma, 2, 8)] << + ((dn & 1) << 2); if (timing->mode == XFER_UDMA_5) t |= 6; if (timing->mode == XFER_UDMA_6) @@ -123,12 +129,15 @@ int T, UT; int aec_old; - aec_old = (drive->channel->pci_dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF); + aec_old = + (drive->channel->pci_dev->device == + PCI_DEVICE_ID_ARTOP_ATP850UF); if (speed != XFER_PIO_SLOW && speed != drive->current_speed) if (ide_config_drive_speed(drive, speed)) - printk(KERN_WARNING "ide%d: Drive %d didn't accept speed setting. Oh, well.\n", - drive->dn >> 1, drive->dn & 1); + printk(KERN_WARNING + "ide%d: Drive %d didn't accept speed setting. Oh, well.\n", + drive->dn >> 1, drive->dn & 1); T = 1000000000 / system_bus_speed; UT = T / (aec_old ? 1 : 2); @@ -152,7 +161,9 @@ static void aec62xx_tune_drive(struct ata_device *drive, unsigned char pio) { if (pio == 255) { - aec_set_drive(drive, ata_timing_mode(drive, XFER_PIO | XFER_EPIO)); + aec_set_drive(drive, + ata_timing_mode(drive, + XFER_PIO | XFER_EPIO)); return; } @@ -169,14 +180,17 @@ if (ch->udma_four) switch (ch->pci_dev->device) { - case PCI_DEVICE_ID_ARTOP_ATP865R: - case PCI_DEVICE_ID_ARTOP_ATP865: - /* Can't use these modes simultaneously, - based on which PLL clock was chosen. */ - map |= inb (bmide + AEC_BM_STAT_PCH) & AEC_PLLCLK_ATA133 ? XFER_UDMA_133 : XFER_UDMA_100; - case PCI_DEVICE_ID_ARTOP_ATP860R: - case PCI_DEVICE_ID_ARTOP_ATP860: - map |= XFER_UDMA_66; + case PCI_DEVICE_ID_ARTOP_ATP865R: + case PCI_DEVICE_ID_ARTOP_ATP865: + /* Can't use these modes simultaneously, + based on which PLL clock was chosen. */ + map |= + inb(bmide + + AEC_BM_STAT_PCH) & AEC_PLLCLK_ATA133 ? + XFER_UDMA_133 : XFER_UDMA_100; + case PCI_DEVICE_ID_ARTOP_ATP860R: + case PCI_DEVICE_ID_ARTOP_ATP860: + map |= XFER_UDMA_66; } return map; @@ -200,27 +214,28 @@ switch (dev->device) { - case PCI_DEVICE_ID_ARTOP_ATP865R: - case PCI_DEVICE_ID_ARTOP_ATP865: + case PCI_DEVICE_ID_ARTOP_ATP865R: + case PCI_DEVICE_ID_ARTOP_ATP865: - /* Clear reset and test bits. */ - pci_read_config_byte(dev, AEC_MISC, &t); - pci_write_config_byte(dev, AEC_MISC, t & ~0x30); - - /* Enable chip interrupt output. */ - pci_read_config_byte(dev, AEC_IDE_ENABLE, &t); - pci_write_config_byte(dev, AEC_IDE_ENABLE, t & ~0x01); + /* Clear reset and test bits. */ + pci_read_config_byte(dev, AEC_MISC, &t); + pci_write_config_byte(dev, AEC_MISC, t & ~0x30); + + /* Enable chip interrupt output. */ + pci_read_config_byte(dev, AEC_IDE_ENABLE, &t); + pci_write_config_byte(dev, AEC_IDE_ENABLE, t & ~0x01); #ifdef CONFIG_AEC6280_BURST - /* Must be greater than 0x80 for burst mode. */ - pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x90); + /* Must be greater than 0x80 for burst mode. */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x90); - /* Enable burst mode. */ - pci_read_config_byte(dev, AEC_IDE_ENABLE, &t); - pci_write_config_byte(dev, AEC_IDE_ENABLE, t | 0x80); + /* Enable burst mode. */ + pci_read_config_byte(dev, AEC_IDE_ENABLE, &t); + pci_write_config_byte(dev, AEC_IDE_ENABLE, t | 0x80); #endif - /* switch cable detection pins to input-only. */ - outb (inb (bmide + AEC_BM_STAT_SCH) | AEC_CABLEPINS_INPUT, bmide + AEC_BM_STAT_SCH); + /* switch cable detection pins to input-only. */ + outb(inb(bmide + AEC_BM_STAT_SCH) | AEC_CABLEPINS_INPUT, + bmide + AEC_BM_STAT_SCH); } /* @@ -229,7 +244,7 @@ pci_read_config_byte(dev, PCI_REVISION_ID, &t); printk(KERN_INFO "AEC_IDE: %s (rev %02x) controller on pci%s\n", - dev->name, t, dev->slot_name); + dev->name, t, dev->slot_name); return dev->irq; } @@ -274,7 +289,8 @@ /* * We allow the BM-DMA driver only work on enabled interfaces. */ -static void __init aec62xx_init_dma(struct ata_channel *ch, unsigned long dmabase) +static void __init aec62xx_init_dma(struct ata_channel *ch, + unsigned long dmabase) { unsigned char t; @@ -286,50 +302,49 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_ARTOP, - device: PCI_DEVICE_ID_ARTOP_ATP850UF, - init_chipset: aec62xx_init_chipset, - init_channel: aec62xx_init_channel, - init_dma: aec62xx_init_dma, - enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, - bootable: OFF_BOARD, - flags: ATA_F_SER | ATA_F_IRQ | ATA_F_DMA + .vendor = PCI_VENDOR_ID_ARTOP, + .device = PCI_DEVICE_ID_ARTOP_ATP850UF, + .init_chipset = aec62xx_init_chipset, + .init_channel = aec62xx_init_channel, + .init_dma = aec62xx_init_dma, + .enablebits = {{0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04}}, + .bootable = OFF_BOARD, + .flags = ATA_F_SER | ATA_F_IRQ | ATA_F_DMA }, { - vendor: PCI_VENDOR_ID_ARTOP, - device: PCI_DEVICE_ID_ARTOP_ATP860, - init_chipset: aec62xx_init_chipset, - init_channel: aec62xx_init_channel, - enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, - bootable: NEVER_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA + .vendor = PCI_VENDOR_ID_ARTOP, + .device = PCI_DEVICE_ID_ARTOP_ATP860, + .init_chipset = aec62xx_init_chipset, + .init_channel = aec62xx_init_channel, + .enablebits = {{0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04}}, + .bootable = NEVER_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA }, { - vendor: PCI_VENDOR_ID_ARTOP, - device: PCI_DEVICE_ID_ARTOP_ATP860R, - init_chipset: aec62xx_init_chipset, - init_channel: aec62xx_init_channel, - enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA - }, + .vendor = PCI_VENDOR_ID_ARTOP, + .device = PCI_DEVICE_ID_ARTOP_ATP860R, + .init_chipset = aec62xx_init_chipset, + .init_channel = aec62xx_init_channel, + .enablebits = {{0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04}}, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, { - vendor: PCI_VENDOR_ID_ARTOP, - device: PCI_DEVICE_ID_ARTOP_ATP865, - init_chipset: aec62xx_init_chipset, - init_channel: aec62xx_init_channel, - enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, - bootable: NEVER_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA + .vendor = PCI_VENDOR_ID_ARTOP, + .device = PCI_DEVICE_ID_ARTOP_ATP865, + .init_chipset = aec62xx_init_chipset, + .init_channel = aec62xx_init_channel, + .enablebits = {{0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04}}, + .bootable = NEVER_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA }, { - vendor: PCI_VENDOR_ID_ARTOP, - device: PCI_DEVICE_ID_ARTOP_ATP865R, - init_chipset: aec62xx_init_chipset, - init_channel: aec62xx_init_channel, - enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA + .vendor = PCI_VENDOR_ID_ARTOP, + .device = PCI_DEVICE_ID_ARTOP_ATP865R, + .init_chipset = aec62xx_init_chipset, + .init_channel = aec62xx_init_channel, + .enablebits = {{0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04}}, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA } }; diff -Nru a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c --- a/drivers/ide/ali14xx.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ali14xx.c Sat Jul 20 12:12:35 2002 @@ -46,11 +46,12 @@ /* port addresses for auto-detection */ #define ALI_NUM_PORTS 4 -static int ports[ALI_NUM_PORTS] __initdata = { 0x074, 0x0f4, 0x034, 0x0e4 }; +static int ports[ALI_NUM_PORTS] __initdata = + { 0x074, 0x0f4, 0x034, 0x0e4 }; /* register initialization data */ struct reg_initializer { - u8 reg, data; + u8 reg, data; }; static struct reg_initializer init_data[] __initdata = { @@ -67,17 +68,21 @@ static struct { u8 reg1, reg2, reg3, reg4; } reg_tab[4] = { - { 0x03, 0x26, 0x04, 0x27 }, /* drive 0 */ - { 0x05, 0x28, 0x06, 0x29 }, /* drive 1 */ - { 0x2b, 0x30, 0x2c, 0x31 }, /* drive 2 */ - { 0x2d, 0x32, 0x2e, 0x33 }, /* drive 3 */ + { + 0x03, 0x26, 0x04, 0x27}, /* drive 0 */ + { + 0x05, 0x28, 0x06, 0x29}, /* drive 1 */ + { + 0x2b, 0x30, 0x2c, 0x31}, /* drive 2 */ + { + 0x2d, 0x32, 0x2e, 0x33}, /* drive 3 */ }; -static int base_port; /* base port address */ -static int reg_port; /* port for register number */ -static int data_port; /* port for register data */ -static u8 reg_on; /* output to base port to access registers */ -static u8 reg_off; /* output to base port to close registers */ +static int base_port; /* base port address */ +static int reg_port; /* port for register number */ +static int data_port; /* port for register data */ +static u8 reg_on; /* output to base port to access registers */ +static u8 reg_off; /* output to base port to close registers */ /* * Read a controller register. @@ -121,13 +126,16 @@ time1 = t->cycle; time2 = t->active; param3 = param1 = (time2 * system_bus_speed + 999999) / 1000000; - param4 = param2 = (time1 * system_bus_speed + 999999) / 1000000 - param1; + param4 = param2 = + (time1 * system_bus_speed + 999999) / 1000000 - param1; if (pio < XFER_PIO_3) { param3 += 8; param4 += 8; } - printk(KERN_DEBUG "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n", - drive->name, pio - XFER_PIO_0, time1, time2, param1, param2, param3, param4); + printk(KERN_DEBUG + "%s: PIO mode%d, t1=%dns, t2=%dns, cycles = %d+%d, %d+%d\n", + drive->name, pio - XFER_PIO_0, time1, time2, param1, param2, + param3, param4); /* stuff timing parameters into controller registers */ drive_num = (drive->channel->index << 1) + drive->select.b.unit; @@ -150,8 +158,7 @@ int i; unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); for (i = 0; i < ALI_NUM_PORTS; i++) { base_port = ports[i]; reg_off = inb(base_port); @@ -163,7 +170,7 @@ data_port = base_port + 8; t = in_reg(0) & 0xf0; outb_p(reg_off, base_port); - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); if (t != 0x50) return 0; return 1; /* success */ @@ -171,7 +178,8 @@ } outb_p(reg_off, base_port); } - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); + return 0; } @@ -184,15 +192,15 @@ unsigned long flags; u8 t; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); outb_p(reg_on, base_port); for (p = init_data; p->reg != 0; ++p) out_reg(p->data, p->reg); outb_p(0x01, reg_port); t = inb(reg_port) & 0x01; outb_p(reg_off, base_port); - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); + return t; } @@ -205,7 +213,7 @@ } printk(KERN_DEBUG "ali14xx: base=%#03x, reg_on=%#02x\n", - base_port, reg_on); + base_port, reg_on); ide_hwifs[0].chipset = ide_ali14xx; ide_hwifs[1].chipset = ide_ali14xx; ide_hwifs[0].tuneproc = &ali14xx_tune_drive; diff -Nru a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c --- a/drivers/ide/alim15x3.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/alim15x3.c Sat Jul 20 12:12:35 2002 @@ -73,10 +73,10 @@ if (r_clc >= 16) r_clc = 0; } - __save_flags(flags); - __cli(); - - /* + + local_irq_save(flags); + + /* * PIO mode => ATA FIFO on, ATAPI FIFO off */ pci_read_config_byte(dev, portFIFO, &cd_dma_fifo); @@ -96,7 +96,8 @@ pci_write_config_byte(dev, port, s_clc); pci_write_config_byte(dev, port+drive->select.b.unit+2, (a_clc << 4) | r_clc); - __restore_flags(flags); + + local_irq_restore(flags); } static int ali15x3_tune_chipset(struct ata_device *drive, byte speed) @@ -216,8 +217,7 @@ unsigned long flags; byte tmpbyte; - __save_flags(flags); - __cli(); + local_irq_save(flags); if (m5229_revision >= 0xC2) { /* @@ -297,9 +297,9 @@ pci_write_config_byte(dev, 0x53, tmpbyte); - __restore_flags(flags); + local_irq_restore(flags); - return(ata66); + return (ata66); } static void __init ali15x3_init_channel(struct ata_channel *hwif) @@ -374,22 +374,22 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_AL, - device: PCI_DEVICE_ID_AL_M5219, + .vendor = PCI_VENDOR_ID_AL, + .device = PCI_DEVICE_ID_AL_M5219, /* FIXME: Perhaps we should use the same init routines * as below here. */ - enablebits: { {0x00,0x00,0x00}, {0x00,0x00,0x00} }, - bootable: ON_BOARD, - flags: ATA_F_SIMPLEX + .enablebits = { {0x00,0x00,0x00}, {0x00,0x00,0x00} }, + .bootable = ON_BOARD, + .flags = ATA_F_SIMPLEX }, { - vendor: PCI_VENDOR_ID_AL, - device: PCI_DEVICE_ID_AL_M5229, - init_chipset: ali15x3_init_chipset, - init_channel: ali15x3_init_channel, - init_dma: ali15x3_init_dma, - enablebits: { {0x00,0x00,0x00}, {0x00,0x00,0x00} }, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_AL, + .device = PCI_DEVICE_ID_AL_M5229, + .init_chipset = ali15x3_init_chipset, + .init_channel = ali15x3_init_channel, + .init_dma = ali15x3_init_dma, + .enablebits = { {0x00,0x00,0x00}, {0x00,0x00,0x00} }, + .bootable = ON_BOARD } }; @@ -397,9 +397,8 @@ { int i; - for (i = 0; i < ARRAY_SIZE(chipsets); ++i) { + for (i = 0; i < ARRAY_SIZE(chipsets); ++i) ata_register_chipset(&chipsets[i]); - } return 0; } diff -Nru a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c --- a/drivers/ide/amd74xx.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/amd74xx.c Sat Jul 20 12:12:35 2002 @@ -303,59 +303,59 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_AMD, - device: PCI_DEVICE_ID_AMD_COBRA_7401, - init_chipset: amd74xx_init_chipset, - init_channel: amd74xx_init_channel, - init_dma: amd74xx_init_dma, - enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_COBRA_7401, + .init_chipset = amd74xx_init_chipset, + .init_channel = amd74xx_init_channel, + .init_dma = amd74xx_init_dma, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_AMD, - device: PCI_DEVICE_ID_AMD_VIPER_7409, - init_chipset: amd74xx_init_chipset, - init_channel: amd74xx_init_channel, - init_dma: amd74xx_init_dma, - enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, - bootable: ON_BOARD, - flags: ATA_F_SIMPLEX + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_VIPER_7409, + .init_chipset = amd74xx_init_chipset, + .init_channel = amd74xx_init_channel, + .init_dma = amd74xx_init_dma, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, + .bootable = ON_BOARD, + .flags = ATA_F_SIMPLEX }, { - vendor: PCI_VENDOR_ID_AMD, - device: PCI_DEVICE_ID_AMD_VIPER_7411, - init_chipset: amd74xx_init_chipset, - init_channel: amd74xx_init_channel, - init_dma: amd74xx_init_dma, - enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_VIPER_7411, + .init_chipset = amd74xx_init_chipset, + .init_channel = amd74xx_init_channel, + .init_dma = amd74xx_init_dma, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_AMD, - device: PCI_DEVICE_ID_AMD_OPUS_7441, - init_chipset: amd74xx_init_chipset, - init_channel: amd74xx_init_channel, - init_dma: amd74xx_init_dma, - enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_OPUS_7441, + .init_chipset = amd74xx_init_chipset, + .init_channel = amd74xx_init_channel, + .init_dma = amd74xx_init_dma, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_AMD, - device: PCI_DEVICE_ID_AMD_8111_IDE, - init_chipset: amd74xx_init_chipset, - init_channel: amd74xx_init_channel, - init_dma: amd74xx_init_dma, - enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_8111_IDE, + .init_chipset = amd74xx_init_chipset, + .init_channel = amd74xx_init_channel, + .init_dma = amd74xx_init_dma, + .enablebits = {{0x40,0x01,0x01}, {0x40,0x02,0x02}}, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_NVIDIA, - device: PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, - init_chipset: amd74xx_init_chipset, - init_channel: amd74xx_init_channel, - init_dma: amd74xx_init_dma, - enablebits: {{0x50,0x01,0x01}, {0x50,0x02,0x02}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, + .init_chipset = amd74xx_init_chipset, + .init_channel = amd74xx_init_channel, + .init_dma = amd74xx_init_dma, + .enablebits = {{0x50,0x01,0x01}, {0x50,0x02,0x02}}, + .bootable = ON_BOARD }, }; @@ -363,9 +363,8 @@ { int i; - for (i = 0; i < ARRAY_SIZE(chipsets); ++i) { + for (i = 0; i < ARRAY_SIZE(chipsets); ++i) ata_register_chipset(&chipsets[i]); - } return 0; } diff -Nru a/drivers/ide/atapi.c b/drivers/ide/atapi.c --- a/drivers/ide/atapi.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/atapi.c Sat Jul 20 12:12:35 2002 @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,66 @@ outsw(IDE_DATA_REG, buf + (n & ~0x03), 1); } + +/* + * This function issues a special IDE device request onto the request queue. + * + * If action is ide_wait, then the rq is queued at the end of the request + * queue, and the function sleeps until it has been processed. This is for use + * when invoked from an ioctl handler. + * + * If action is ide_preempt, then the rq is queued at the head of the request + * queue, displacing the currently-being-processed request and this function + * returns immediately without waiting for the new rq to be completed. This is + * VERY DANGEROUS, and is intended for careful use by the ATAPI tape/cdrom + * driver code. + * + * If action is ide_end, then the rq is queued at the end of the request queue, + * and the function returns immediately without waiting for the new rq to be + * completed. This is again intended for careful use by the ATAPI tape/cdrom + * driver code. + */ +int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t action) +{ + unsigned long flags; + struct ata_channel *ch = drive->channel; + unsigned int major = ch->major; + request_queue_t *q = &drive->queue; + struct list_head *queue_head = &q->queue_head; + DECLARE_COMPLETION(wait); + +#ifdef CONFIG_BLK_DEV_PDC4030 + if (ch->chipset == ide_pdc4030 && rq->buffer) + return -ENOSYS; /* special drive cmds not supported */ +#endif + rq->errors = 0; + rq->rq_status = RQ_ACTIVE; + rq->rq_dev = mk_kdev(major, (drive->select.b.unit) << PARTN_BITS); + if (action == ide_wait) + rq->waiting = &wait; + + spin_lock_irqsave(ch->lock, flags); + + if (action == ide_preempt) + drive->rq = NULL; + else if (!blk_queue_empty(&drive->queue)) + queue_head = queue_head->prev; /* ide_end and ide_wait */ + + __elv_add_request(q, rq, queue_head); + + do_ide_request(q); + + spin_unlock_irqrestore(ch->lock, flags); + + if (action == ide_wait) { + wait_for_completion(&wait); /* wait for it to be serviced */ + return rq->errors ? -EIO : 0; /* return -EIO if errors */ + } + + return 0; +} + +EXPORT_SYMBOL(ide_do_drive_cmd); EXPORT_SYMBOL(atapi_discard_data); EXPORT_SYMBOL(atapi_write_zeros); EXPORT_SYMBOL(atapi_init_pc); diff -Nru a/drivers/ide/ataraid.c b/drivers/ide/ataraid.c --- a/drivers/ide/ataraid.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ataraid.c Sat Jul 20 12:12:35 2002 @@ -34,12 +34,14 @@ #include "ataraid.h" -static struct raid_device_operations* ataraid_ops[16]; +static struct raid_device_operations *ataraid_ops[16]; -static int ataraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -static int ataraid_open(struct inode * inode, struct file * filp); -static int ataraid_release(struct inode * inode, struct file * filp); -static void ataraid_split_request(request_queue_t *q, int rw, struct buffer_head * bh); +static int ataraid_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); +static int ataraid_open(struct inode *inode, struct file *filp); +static int ataraid_release(struct inode *inode, struct file *filp); +static void ataraid_split_request(request_queue_t * q, int rw, + struct buffer_head *bh); struct gendisk ataraid_gendisk; @@ -47,12 +49,12 @@ static int ataraid_readahead[256]; static struct block_device_operations ataraid_fops = { - owner: THIS_MODULE, - open: ataraid_open, - release: ataraid_release, - ioctl: ataraid_ioctl, + .owner = THIS_MODULE, + .open = ataraid_open, + .release = ataraid_release, + .ioctl = ataraid_ioctl, }; - + static DECLARE_MUTEX(ataraid_sem); @@ -63,48 +65,50 @@ /* stub fops functions */ -static int ataraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static int ataraid_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) { int minor; - minor = minor(inode->i_rdev)>>SHIFT; - - if ((ataraid_ops[minor])&&(ataraid_ops[minor]->ioctl)) - return (ataraid_ops[minor]->ioctl)(inode,file,cmd,arg); + minor = minor(inode->i_rdev) >> SHIFT; + + if ((ataraid_ops[minor]) && (ataraid_ops[minor]->ioctl)) + return (ataraid_ops[minor]->ioctl) (inode, file, cmd, arg); return -EINVAL; } -static int ataraid_open(struct inode * inode, struct file * filp) +static int ataraid_open(struct inode *inode, struct file *filp) { int minor; - minor = minor(inode->i_rdev)>>SHIFT; + minor = minor(inode->i_rdev) >> SHIFT; - if ((ataraid_ops[minor])&&(ataraid_ops[minor]->open)) - return (ataraid_ops[minor]->open)(inode,filp); + if ((ataraid_ops[minor]) && (ataraid_ops[minor]->open)) + return (ataraid_ops[minor]->open) (inode, filp); return -EINVAL; } -static int ataraid_release(struct inode * inode, struct file * filp) +static int ataraid_release(struct inode *inode, struct file *filp) { int minor; - minor = minor(inode->i_rdev)>>SHIFT; + minor = minor(inode->i_rdev) >> SHIFT; - if ((ataraid_ops[minor])&&(ataraid_ops[minor]->release)) - return (ataraid_ops[minor]->release)(inode,filp); + if ((ataraid_ops[minor]) && (ataraid_ops[minor]->release)) + return (ataraid_ops[minor]->release) (inode, filp); return -EINVAL; } -static int ataraid_make_request (request_queue_t *q, int rw, struct buffer_head * bh) +static int ataraid_make_request(request_queue_t * q, int rw, + struct buffer_head *bh) { int minor; int retval; - minor = minor(bh->b_rdev)>>SHIFT; + minor = minor(bh->b_rdev) >> SHIFT; + + if ((ataraid_ops[minor]) && (ataraid_ops[minor]->make_request)) { - if ((ataraid_ops[minor])&&(ataraid_ops[minor]->make_request)) { - - retval= (ataraid_ops[minor]->make_request)(q,rw,bh); + retval = (ataraid_ops[minor]->make_request) (q, rw, bh); if (retval == -1) { - ataraid_split_request(q,rw,bh); + ataraid_split_request(q, rw, bh); return 0; } else return retval; @@ -116,7 +120,7 @@ { void *ptr = NULL; while (!ptr) { - ptr=kmalloc(sizeof(struct buffer_head),GFP_NOIO); + ptr = kmalloc(sizeof(struct buffer_head), GFP_NOIO); if (!ptr) yield(); } @@ -129,7 +133,7 @@ { void *ptr = NULL; while (!ptr) { - ptr=kmalloc(sizeof(struct ataraid_bh_private),GFP_NOIO); + ptr = kmalloc(sizeof(struct ataraid_bh_private), GFP_NOIO); if (!ptr) yield(); } @@ -142,11 +146,11 @@ { struct ataraid_bh_private *private = bh->b_private; - if (private==NULL) + if (private == NULL) BUG(); if (atomic_dec_and_test(&private->count)) { - private->parent->b_end_io(private->parent,uptodate); + private->parent->b_end_io(private->parent, uptodate); private->parent = NULL; kfree(private); } @@ -155,23 +159,24 @@ EXPORT_SYMBOL(ataraid_end_request); -static void ataraid_split_request(request_queue_t *q, int rw, struct buffer_head * bh) +static void ataraid_split_request(request_queue_t * q, int rw, + struct buffer_head *bh) { - struct buffer_head *bh1,*bh2; + struct buffer_head *bh1, *bh2; struct ataraid_bh_private *private; - bh1=ataraid_get_bhead(); - bh2=ataraid_get_bhead(); + bh1 = ataraid_get_bhead(); + bh2 = ataraid_get_bhead(); /* If either of those ever fails we're doomed */ - if ((!bh1)||(!bh2)) + if ((!bh1) || (!bh2)) BUG(); private = ataraid_get_private(); - if (private==NULL) + if (private == NULL) BUG(); - + memcpy(bh1, bh, sizeof(*bh)); memcpy(bh2, bh, sizeof(*bh)); - + bh1->b_end_io = ataraid_end_request; bh2->b_end_io = ataraid_end_request; @@ -182,12 +187,12 @@ bh1->b_private = private; bh2->b_private = private; - atomic_set(&private->count,2); + atomic_set(&private->count, 2); - bh2->b_data += bh->b_size/2; + bh2->b_data += bh->b_size / 2; - generic_make_request(rw,bh1); - generic_make_request(rw,bh2); + generic_make_request(rw, bh1); + generic_make_request(rw, bh2); } @@ -200,12 +205,12 @@ { int bit; down(&ataraid_sem); - if (ataraiduse==~0U) { + if (ataraiduse == ~0U) { up(&ataraid_sem); return -ENODEV; } - bit=ffz(ataraiduse); - ataraiduse |= 1<maskproc(drive); } +EXPORT_SYMBOL(ata_mask); + /* * Check the state of the status register. */ @@ -94,6 +96,39 @@ EXPORT_SYMBOL(ata_status); /* + * This is used to check for the drive status on the IRQ handling code path. + */ +int ata_status_irq(struct ata_device *drive) +{ + if (test_bit(IDE_DMA, drive->channel->active)) + return udma_irq_status(drive); + + /* Need to guarantee 400ns since last command was issued? + */ +#ifdef CONFIG_IDEPCI_SHARE_IRQ + + /* + * We do a passive status test under shared PCI interrupts on cards + * that truly share the ATA side interrupt, but may also share an + * interrupt with another pci card/device. + */ + + if (drive->channel->io_ports[IDE_CONTROL_OFFSET]) + drive->status = IN_BYTE(drive->channel->io_ports[IDE_CONTROL_OFFSET]); + + else +#endif + ata_status(drive, 0, 0); /* Note: this may clear a pending IRQ! */ + + if (drive->status & BUSY_STAT) + return 0; /* drive busy: definitely not interrupting */ + else + return 1; /* drive ready: *might* be interrupting */ +} + +EXPORT_SYMBOL(ata_status_irq); + +/* * Busy-wait for the drive status to be not "busy". Check then the status for * all of the "good" bits and none of the "bad" bits, and if all is okay it * returns 0. All other cases return 1 after invoking error handler -- caller @@ -116,7 +151,7 @@ unsigned long flags; __save_flags(flags); - ide__sti(); + local_irq_enable(); timeout += jiffies; while (!ata_status(drive, 0, BUSY_STAT)) { if (time_after(jiffies, timeout)) { @@ -210,6 +245,8 @@ OUT_BYTE(rf->high_cylinder, ch->io_ports[IDE_HCYL_OFFSET]); } +EXPORT_SYMBOL(ata_out_regfile); + /* * Input a complete register file. */ @@ -222,6 +259,5 @@ rf->low_cylinder = IN_BYTE(ch->io_ports[IDE_LCYL_OFFSET]); rf->high_cylinder = IN_BYTE(ch->io_ports[IDE_HCYL_OFFSET]); } - MODULE_LICENSE("GPL"); diff -Nru a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c --- a/drivers/ide/dtc2278.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/dtc2278.c Sat Jul 20 12:12:35 2002 @@ -95,8 +95,7 @@ { unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); /* * This enables the second interface */ @@ -112,7 +111,7 @@ sub22(1,0xc3); sub22(0,0xa0); #endif - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); ide_hwifs[0].serialized = 1; ide_hwifs[1].serialized = 1; diff -Nru a/drivers/ide/gayle.c b/drivers/ide/gayle.c --- a/drivers/ide/gayle.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/gayle.c Sat Jul 20 12:12:35 2002 @@ -106,9 +106,9 @@ return 1; } - /* - * Probe for a Gayle IDE interface (and optionally for an IDE doubler) - */ +/* + * Probe for a Gayle IDE interface (and optionally for an IDE doubler) + */ void __init gayle_init(void) { @@ -122,7 +122,7 @@ for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { ide_ioreg_t base, ctrlport, irqport; - ide_ack_intr_t *ack_intr; + int (*ack_intr)(struct ata_channel *); hw_regs_t hw; int index; unsigned long phys_base, res_start, res_n; diff -Nru a/drivers/ide/hd.c b/drivers/ide/hd.c --- a/drivers/ide/hd.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/hd.c Sat Jul 20 12:12:34 2002 @@ -132,16 +132,16 @@ unsigned long read_timer(void) { + extern spinlock_t i8253_lock; unsigned long t, flags; int i; - save_flags(flags); - cli(); + spin_lock_irqsave(&i8253_lock, flags); t = jiffies * 11932; outb_p(0, 0x43); i = inb_p(0x40); i |= inb(0x40) << 8; - restore_flags(flags); + spin_unlock_irqrestore(&i8253_lock, flags); return(t - i); } #endif @@ -693,12 +693,12 @@ extern struct block_device_operations hd_fops; static struct gendisk hd_gendisk = { - major: MAJOR_NR, - major_name: "hd", - minor_shift: 6, - part: hd, - sizes: hd_sizes, - fops: &hd_fops, + .major = MAJOR_NR, + .major_name = "hd", + .minor_shift = 6, + .part = hd, + .sizes = hd_sizes, + .fops = &hd_fops, }; static void hd_interrupt(int irq, void *dev_id, struct pt_regs *regs) @@ -714,9 +714,9 @@ } static struct block_device_operations hd_fops = { - open: hd_open, - release: hd_release, - ioctl: hd_ioctl, + .open = hd_open, + .release = hd_release, + .ioctl = hd_ioctl, }; /* @@ -817,8 +817,19 @@ NR_HD = 0; return; } - request_region(HD_DATA, 8, "hd"); - request_region(HD_CMD, 1, "hd(cmd)"); + if (!request_region(HD_DATA, 8, "hd")) { + printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA); + NR_HD = 0; + free_irq(HD_IRQ, NULL); + return; + } + if (!request_region(HD_CMD, 1, "hd(cmd)")) { + printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD); + NR_HD = 0; + free_irq(HD_IRQ, NULL); + release_region(HD_DATA, 8); + return; + } hd_gendisk.nr_real = NR_HD; diff -Nru a/drivers/ide/hpt34x.c b/drivers/ide/hpt34x.c --- a/drivers/ide/hpt34x.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/hpt34x.c Sat Jul 20 12:12:34 2002 @@ -135,8 +135,7 @@ unsigned short cmd; unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); pci_write_config_byte(dev, HPT34X_PCI_INIT_REG, 0x00); pci_read_config_word(dev, PCI_COMMAND, &cmd); @@ -167,7 +166,7 @@ pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, dev->resource[3].start); pci_write_config_word(dev, PCI_COMMAND, cmd); - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); return dev->irq; } @@ -202,13 +201,13 @@ /* module data table */ static struct ata_pci_device chipset __initdata = { - vendor: PCI_VENDOR_ID_TTI, - device: PCI_DEVICE_ID_TTI_HPT343, - init_chipset: pci_init_hpt34x, - init_channel: ide_init_hpt34x, - bootable: NEVER_BOARD, - extra: 16, - flags: ATA_F_DMA + .vendor = PCI_VENDOR_ID_TTI, + .device = PCI_DEVICE_ID_TTI_HPT343, + .init_chipset = pci_init_hpt34x, + .init_channel = ide_init_hpt34x, + .bootable = NEVER_BOARD, + .extra = 16, + .flags = ATA_F_DMA }; int __init init_hpt34x(void) diff -Nru a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c --- a/drivers/ide/hpt366.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/hpt366.c Sat Jul 20 12:12:34 2002 @@ -1222,34 +1222,34 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_TTI, - device: PCI_DEVICE_ID_TTI_HPT366, - init_chipset: hpt366_init_chipset, - init_channel: hpt366_init_channel, - init_dma: hpt366_init_dma, - bootable: OFF_BOARD, - extra: 240, - flags: ATA_F_IRQ | ATA_F_HPTHACK | ATA_F_DMA + .vendor = PCI_VENDOR_ID_TTI, + .device = PCI_DEVICE_ID_TTI_HPT366, + .init_chipset = hpt366_init_chipset, + .init_channel = hpt366_init_channel, + .init_dma = hpt366_init_dma, + .bootable = OFF_BOARD, + .extra = 240, + .flags = ATA_F_IRQ | ATA_F_HPTHACK | ATA_F_DMA }, { - vendor: PCI_VENDOR_ID_TTI, - device: PCI_DEVICE_ID_TTI_HPT372, - init_chipset: hpt366_init_chipset, - init_channel: hpt366_init_channel, - init_dma: hpt366_init_dma, - bootable: OFF_BOARD, - extra: 0, - flags: ATA_F_IRQ | ATA_F_HPTHACK | ATA_F_DMA + .vendor = PCI_VENDOR_ID_TTI, + .device = PCI_DEVICE_ID_TTI_HPT372, + .init_chipset = hpt366_init_chipset, + .init_channel = hpt366_init_channel, + .init_dma = hpt366_init_dma, + .bootable = OFF_BOARD, + .extra = 0, + .flags = ATA_F_IRQ | ATA_F_HPTHACK | ATA_F_DMA }, { - vendor: PCI_VENDOR_ID_TTI, - device: PCI_DEVICE_ID_TTI_HPT374, - init_chipset: hpt366_init_chipset, - init_channel: hpt366_init_channel, - init_dma: hpt366_init_dma, - bootable: OFF_BOARD, - extra: 0, - flags: ATA_F_IRQ | ATA_F_HPTHACK | ATA_F_DMA + .vendor = PCI_VENDOR_ID_TTI, + .device = PCI_DEVICE_ID_TTI_HPT374, + .init_chipset = hpt366_init_chipset, + .init_channel = hpt366_init_channel, + .init_dma = hpt366_init_dma, + .bootable = OFF_BOARD, + .extra = 0, + .flags = ATA_F_IRQ | ATA_F_HPTHACK | ATA_F_DMA }, }; diff -Nru a/drivers/ide/hptraid.c b/drivers/ide/hptraid.c --- a/drivers/ide/hptraid.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/hptraid.c Sat Jul 20 12:12:35 2002 @@ -35,15 +35,17 @@ #include "ataraid.h" -static int hptraid_open(struct inode * inode, struct file * filp); -static int hptraid_release(struct inode * inode, struct file * filp); -static int hptraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -static int hptraid_make_request (request_queue_t *q, int rw, struct buffer_head * bh); +static int hptraid_open(struct inode *inode, struct file *filp); +static int hptraid_release(struct inode *inode, struct file *filp); +static int hptraid_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); +static int hptraid_make_request(request_queue_t * q, int rw, + struct buffer_head *bh); struct hptdisk { - kdev_t device; + kdev_t device; unsigned long sectors; struct block_device *bdev; }; @@ -53,79 +55,93 @@ unsigned int disks; unsigned long sectors; struct geom geom; - + struct hptdisk disk[8]; - + unsigned long cutoff[8]; - unsigned int cutoff_disks[8]; + unsigned int cutoff_disks[8]; }; static struct raid_device_operations hptraid_ops = { - open: hptraid_open, - release: hptraid_release, - ioctl: hptraid_ioctl, - make_request: hptraid_make_request + .open = hptraid_open, + .release = hptraid_release, + .ioctl = hptraid_ioctl, + .make_request = hptraid_make_request }; static struct hptraid raid[16]; -static int hptraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static int hptraid_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) { unsigned int minor; unsigned char val; unsigned long sectors; - + if (!inode || kdev_none(inode->i_rdev)) return -EINVAL; - minor = minor(inode->i_rdev)>>SHIFT; - + minor = minor(inode->i_rdev) >> SHIFT; + switch (cmd) { - case BLKGETSIZE: /* Return device size */ - if (!arg) return -EINVAL; - sectors = ataraid_gendisk.part[minor(inode->i_rdev)].nr_sects; - if (minor(inode->i_rdev)&15) - return put_user(sectors, (unsigned long *) arg); - return put_user(raid[minor].sectors , (unsigned long *) arg); - break; - + case BLKGETSIZE: /* Return device size */ + if (!arg) + return -EINVAL; + sectors = + ataraid_gendisk.part[minor(inode->i_rdev)].nr_sects; + if (minor(inode->i_rdev) & 15) + return put_user(sectors, (unsigned long *) arg); + return put_user(raid[minor].sectors, + (unsigned long *) arg); + break; + - case HDIO_GETGEO: + case HDIO_GETGEO: { - struct hd_geometry *loc = (struct hd_geometry *) arg; + struct hd_geometry *loc = + (struct hd_geometry *) arg; unsigned short bios_cyl; - - if (!loc) return -EINVAL; + + if (!loc) + return -EINVAL; val = 255; - if (put_user(val, (byte *) &loc->heads)) return -EFAULT; - val=63; - if (put_user(val, (byte *) &loc->sectors)) return -EFAULT; - bios_cyl = raid[minor].sectors/63/255; - if (put_user(bios_cyl, (unsigned short *) &loc->cylinders)) return -EFAULT; - if (put_user((unsigned)ataraid_gendisk.part[minor(inode->i_rdev)].start_sect, - (unsigned long *) &loc->start)) return -EFAULT; + if (put_user(val, (byte *) & loc->heads)) + return -EFAULT; + val = 63; + if (put_user(val, (byte *) & loc->sectors)) + return -EFAULT; + bios_cyl = raid[minor].sectors / 63 / 255; + if (put_user + (bios_cyl, (unsigned short *) &loc->cylinders)) + return -EFAULT; + if (put_user + ((unsigned) ataraid_gendisk. + part[minor(inode->i_rdev)].start_sect, + (unsigned long *) &loc->start)) + return -EFAULT; return 0; } - case BLKROSET: - case BLKROGET: - case BLKSSZGET: - return blk_ioctl(inode->i_bdev, cmd, arg); + case BLKROSET: + case BLKROGET: + case BLKSSZGET: + return blk_ioctl(inode->i_bdev, cmd, arg); - default: - return -EINVAL; + default: + return -EINVAL; }; return 0; } -static int hptraid_make_request (request_queue_t *q, int rw, struct buffer_head * bh) +static int hptraid_make_request(request_queue_t * q, int rw, + struct buffer_head *bh) { unsigned long rsect; - unsigned long rsect_left,rsect_accum = 0; + unsigned long rsect_left, rsect_accum = 0; unsigned long block; - unsigned int disk=0,real_disk=0; + unsigned int disk = 0, real_disk = 0; int i; int device; struct hptraid *thisraid; @@ -143,61 +159,70 @@ * a disk falls out of the "higher" count, we mark the max sector. So once we pass a cutoff * point, we have to divide by one less. */ - - device = (bh->b_rdev >> SHIFT)&MAJOR_MASK; + + device = (bh->b_rdev >> SHIFT) & MAJOR_MASK; thisraid = &raid[device]; - if (thisraid->stride==0) - thisraid->stride=1; + if (thisraid->stride == 0) + thisraid->stride = 1; /* Partitions need adding of the start sector of the partition to the requested sector */ - + rsect += ataraid_gendisk.part[minor(bh->b_rdev)].start_sect; /* Woops we need to split the request to avoid crossing a stride barrier */ - if ((rsect/thisraid->stride) != ((rsect+(bh->b_size/512)-1)/thisraid->stride)) { + if ((rsect / thisraid->stride) != + ((rsect + (bh->b_size / 512) - 1) / thisraid->stride)) { return -1; } - + rsect_left = rsect; - - for (i=0;i<8;i++) { - if (thisraid->cutoff_disks[i]==0) + + for (i = 0; i < 8; i++) { + if (thisraid->cutoff_disks[i] == 0) break; if (rsect > thisraid->cutoff[i]) { /* we're in the wrong area so far */ rsect_left -= thisraid->cutoff[i]; - rsect_accum += thisraid->cutoff[i]/thisraid->cutoff_disks[i]; + rsect_accum += + thisraid->cutoff[i] / + thisraid->cutoff_disks[i]; } else { block = rsect_left / thisraid->stride; disk = block % thisraid->cutoff_disks[i]; - block = (block / thisraid->cutoff_disks[i]) * thisraid->stride; - rsect = rsect_accum + (rsect_left % thisraid->stride) + block; + block = + (block / thisraid->cutoff_disks[i]) * + thisraid->stride; + rsect = + rsect_accum + (rsect_left % thisraid->stride) + + block; break; } } - - for (i=0;i<8;i++) { - if ((disk==0) && (thisraid->disk[i].sectors > rsect_accum)) { + + for (i = 0; i < 8; i++) { + if ((disk == 0) + && (thisraid->disk[i].sectors > rsect_accum)) { real_disk = i; break; } - if ((disk>0) && (thisraid->disk[i].sectors >= rsect_accum)) { + if ((disk > 0) + && (thisraid->disk[i].sectors >= rsect_accum)) { disk--; } - + } disk = real_disk; - + /* All but the first disk have a 10 sector offset */ - if (i>0) - rsect+=10; - - + if (i > 0) + rsect += 10; + + /* * The new BH_Lock semantics in ll_rw_blk.c guarantee that this * is the only IO operation happening on this bh. */ - + bh->b_rdev = thisraid->disk[disk].device; bh->b_rsector = rsect; @@ -211,7 +236,7 @@ #include "hptraid.h" static int __init read_disk_sb(struct block_device *bdev, - struct highpoint_raid_conf *buf) + struct highpoint_raid_conf *buf) { /* Superblock is at 9*512 bytes */ Sector sect; @@ -226,22 +251,22 @@ return -1; } -static unsigned long maxsectors (int major,int minor) +static unsigned long maxsectors(int major, int minor) { unsigned long lba = 0; kdev_t dev; struct ata_device *ideinfo; - dev = mk_kdev(major,minor); - ideinfo = get_info_ptr (dev); - if (ideinfo==NULL) + dev = mk_kdev(major, minor); + ideinfo = get_info_ptr(dev); + if (ideinfo == NULL) return 0; /* first sector of the last cluster */ - if (ideinfo->head==0) + if (ideinfo->head == 0) return 0; - if (ideinfo->sect==0) + if (ideinfo->sect == 0) return 0; lba = (ideinfo->capacity); @@ -249,131 +274,136 @@ } static struct highpoint_raid_conf __initdata prom; -static void __init probedisk(int major, int minor,int device) +static void __init probedisk(int major, int minor, int device) { int i; - struct block_device *bdev = bdget(mk_kdev(major,minor)); + struct block_device *bdev = bdget(mk_kdev(major, minor)); struct gendisk *gd; if (!bdev) return; - if (blkdev_get(bdev,FMODE_READ|FMODE_WRITE,0,BDEV_RAW) < 0) + if (blkdev_get(bdev, FMODE_READ | FMODE_WRITE, 0, BDEV_RAW) < 0) return; - if (maxsectors(major,minor)==0) + if (maxsectors(major, minor) == 0) goto out; - if (read_disk_sb(bdev, &prom)) - goto out; + if (read_disk_sb(bdev, &prom)) + goto out; - if (prom.magic!= 0x5a7816f0) - goto out; - if (prom.type) { - printk(KERN_INFO "hptraid: only RAID0 is supported currently\n"); - goto out; - } + if (prom.magic != 0x5a7816f0) + goto out; + if (prom.type) { + printk(KERN_INFO + "hptraid: only RAID0 is supported currently\n"); + goto out; + } i = prom.disk_number; - if (i<0) + if (i < 0) goto out; - if (i>8) + if (i > 8) goto out; raid[device].disk[i].bdev = bdev; /* This is supposed to prevent others from stealing our underlying disks */ /* now blank the /proc/partitions table for the wrong partition table, so that scripts don't accidentally mount it and crash the kernel */ - /* XXX: the 0 is an utter hack --hch */ - gd=get_gendisk(mk_kdev(major, 0)); - if (gd!=NULL) { + /* XXX: the 0 is an utter hack --hch */ + gd = get_gendisk(mk_kdev(major, 0)); + if (gd != NULL) { int j; - for (j=1+(minor<minor_shift);j<((minor+1)<minor_shift);j++) - gd->part[j].nr_sects=0; + for (j = 1 + (minor << gd->minor_shift); + j < ((minor + 1) << gd->minor_shift); j++) + gd->part[j].nr_sects = 0; } - raid[device].disk[i].device = mk_kdev(major,minor); - raid[device].disk[i].sectors = maxsectors(major,minor); - raid[device].stride = (1<bar)) + for (j = 0; j < 8; j++) + if ((raid[device].disk[j].sectors < smallest) + && (raid[device].disk[j].sectors > bar)) smallest = raid[device].disk[j].sectors; count = 0; - for (j=0;j<8;j++) + for (j = 0; j < 8; j++) if (raid[device].disk[j].sectors >= smallest) count++; - - smallest = smallest * count; + + smallest = smallest * count; bar = smallest; raid[device].cutoff[i] = smallest; raid[device].cutoff_disks[i] = count; - + } } static __init int hptraid_init_one(int device) { - int i,count; + int i, count; - probedisk(IDE0_MAJOR, 0, device); + probedisk(IDE0_MAJOR, 0, device); probedisk(IDE0_MAJOR, 64, device); - probedisk(IDE1_MAJOR, 0, device); + probedisk(IDE1_MAJOR, 0, device); probedisk(IDE1_MAJOR, 64, device); - probedisk(IDE2_MAJOR, 0, device); + probedisk(IDE2_MAJOR, 0, device); probedisk(IDE2_MAJOR, 64, device); - probedisk(IDE3_MAJOR, 0, device); + probedisk(IDE3_MAJOR, 0, device); probedisk(IDE3_MAJOR, 64, device); - + fill_cutoff(device); - + /* Initialize the gendisk structure */ - - ataraid_register_disk(device,raid[device].sectors); - count=0; - printk(KERN_INFO "Highpoint HPT370 Softwareraid driver for linux version 0.01\n"); - - for (i=0;i<8;i++) { - if (raid[device].disk[i].device!=0) { + ataraid_register_disk(device, raid[device].sectors); + + count = 0; + printk(KERN_INFO + "Highpoint HPT370 Softwareraid driver for linux version 0.01\n"); + + for (i = 0; i < 8; i++) { + if (raid[device].disk[i].device != 0) { printk(KERN_INFO "Drive %i is %li Mb \n", - i,raid[device].disk[i].sectors/2048); + i, raid[device].disk[i].sectors / 2048); count++; } } if (count) { - printk(KERN_INFO "Raid array consists of %i drives. \n",count); + printk(KERN_INFO "Raid array consists of %i drives. \n", + count); return 0; } else { printk(KERN_INFO "No raid array found\n"); return -ENODEV; } - + } static __init int hptraid_init(void) { - int retval,device; - - device=ataraid_get_device(&hptraid_ops); - if (device<0) + int retval, device; + + device = ataraid_get_device(&hptraid_ops); + if (device < 0) return -ENODEV; retval = hptraid_init_one(device); if (retval) @@ -381,28 +411,29 @@ return retval; } -static void __exit hptraid_exit (void) +static void __exit hptraid_exit(void) { - int i,device; - for (device = 0; device<16; device++) { - for (i=0;i<8;i++) { - struct block_device *bdev = raid[device].disk[i].bdev; + int i, device; + for (device = 0; device < 16; device++) { + for (i = 0; i < 8; i++) { + struct block_device *bdev = + raid[device].disk[i].bdev; raid[device].disk[i].bdev = NULL; if (bdev) blkdev_put(bdev, BDEV_RAW); - } + } if (raid[device].sectors) ataraid_release_device(device); } } -static int hptraid_open(struct inode * inode, struct file * filp) +static int hptraid_open(struct inode *inode, struct file *filp) { MOD_INC_USE_COUNT; return 0; } -static int hptraid_release(struct inode * inode, struct file * filp) -{ +static int hptraid_release(struct inode *inode, struct file *filp) +{ MOD_DEC_USE_COUNT; return 0; } diff -Nru a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c --- a/drivers/ide/ht6560b.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ht6560b.c Sat Jul 20 12:12:35 2002 @@ -127,8 +127,7 @@ static u8 current_timing = 0; u8 select, timing; - __save_flags (flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); select = HT_CONFIG(drive); timing = HT_TIMING(drive); @@ -152,7 +151,7 @@ printk("ht6560b: %s: select=%#x timing=%#x\n", drive->name, select, timing); #endif } - __restore_flags (flags); /* local CPU only */ + local_irq_restore(flags); } /* diff -Nru a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c --- a/drivers/ide/ide-cd.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide-cd.c Sat Jul 20 12:12:35 2002 @@ -556,7 +556,7 @@ if ((rq->flags & REQ_CMD) && !rq->current_nr_sectors) uptodate = 1; - __ata_end_request(drive, rq, uptodate, 0); + ata_end_request(drive, rq, uptodate, 0); } @@ -912,7 +912,7 @@ if (dma) { if (!dma_error) { - __ata_end_request(drive, rq, 1, rq->nr_sectors); + ata_end_request(drive, rq, 1, rq->nr_sectors); return ATA_OP_FINISHED; } else @@ -1497,7 +1497,7 @@ if (dma_error) return ata_error(drive, rq, "dma error"); - __ata_end_request(drive, rq, 1, rq->nr_sectors); + ata_end_request(drive, rq, 1, rq->nr_sectors); return ATA_OP_FINISHED; } @@ -1936,7 +1936,7 @@ If we get an error for the regular case, we assume a CDI without additional audio tracks. In this case the readable TOC is empty (CDI tracks are not included) - and only holds the Leadout entry. Heiko Eißfeldt */ + and only holds the Leadout entry. Heiko EiÃ^ßfeldt */ ntracks = 0; stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0, (char *)&toc->hdr, @@ -2453,26 +2453,26 @@ * Device initialization. */ static struct cdrom_device_ops ide_cdrom_dops = { - open: ide_cdrom_open_real, - release: ide_cdrom_release_real, - drive_status: ide_cdrom_drive_status, - media_changed: ide_cdrom_check_media_change_real, - tray_move: ide_cdrom_tray_move, - lock_door: ide_cdrom_lock_door, - select_speed: ide_cdrom_select_speed, - get_last_session: ide_cdrom_get_last_session, - get_mcn: ide_cdrom_get_mcn, - reset: ide_cdrom_reset, - audio_ioctl: ide_cdrom_audio_ioctl, - dev_ioctl: ide_cdrom_dev_ioctl, - capability: CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | + .open = ide_cdrom_open_real, + .release = ide_cdrom_release_real, + .drive_status = ide_cdrom_drive_status, + .media_changed = ide_cdrom_check_media_change_real, + .tray_move = ide_cdrom_tray_move, + .lock_door = ide_cdrom_lock_door, + .select_speed = ide_cdrom_select_speed, + .get_last_session = ide_cdrom_get_last_session, + .get_mcn = ide_cdrom_get_mcn, + .reset = ide_cdrom_reset, + .audio_ioctl = ide_cdrom_audio_ioctl, + .dev_ioctl = ide_cdrom_dev_ioctl, + .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM | CDC_GENERIC_PACKET, - generic_packet: ide_cdrom_packet, + .generic_packet = ide_cdrom_packet, }; static int ide_cdrom_register(struct ata_device *drive, int nslots) @@ -2840,11 +2840,11 @@ } /* Forwarding functions to generic routines. */ -static int ide_cdrom_ioctl (struct ata_device *drive, +static int ide_cdrom_ioctl(struct ata_device *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { - return cdrom_ioctl (inode, file, cmd, arg); + return cdrom_ioctl(inode, file, cmd, arg); } static int ide_cdrom_open (struct inode *ip, struct file *fp, struct ata_device *drive) @@ -2932,18 +2932,18 @@ static void ide_cdrom_attach(struct ata_device *drive); static struct ata_operations ide_cdrom_driver = { - owner: THIS_MODULE, - attach: ide_cdrom_attach, - cleanup: ide_cdrom_cleanup, - standby: NULL, - do_request: ide_cdrom_do_request, - end_request: NULL, - ioctl: ide_cdrom_ioctl, - open: ide_cdrom_open, - release: ide_cdrom_release, - check_media_change: ide_cdrom_check_media_change, - revalidate: ide_cdrom_revalidate, - capacity: ide_cdrom_capacity, + .owner = THIS_MODULE, + .attach = ide_cdrom_attach, + .cleanup = ide_cdrom_cleanup, + .standby = NULL, + .do_request = ide_cdrom_do_request, + .end_request = NULL, + .ioctl = ide_cdrom_ioctl, + .open = ide_cdrom_open, + .release = ide_cdrom_release, + .check_media_change = ide_cdrom_check_media_change, + .revalidate = ide_cdrom_revalidate, + .capacity = ide_cdrom_capacity, }; /* options */ diff -Nru a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c --- a/drivers/ide/ide-disk.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/ide-disk.c Sat Jul 20 12:12:34 2002 @@ -140,7 +140,7 @@ --rq->current_nr_sectors; if (rq->current_nr_sectors <= 0) { - if (!__ata_end_request(drive, rq, 1, 0)) { + if (!ata_end_request(drive, rq, 1, 0)) { // printk("Request Ended stat: %02x\n", drive->status); return ATA_OP_FINISHED; @@ -166,7 +166,7 @@ if (!ata_status(drive, DRIVE_READY, drive->bad_wstat)) return ata_error(drive, rq, __FUNCTION__); - if (!rq->current_nr_sectors && !__ata_end_request(drive, rq, 1, 0)) { + if (!rq->current_nr_sectors && !ata_end_request(drive, rq, 1, 0)) { ret = ATA_OP_FINISHED; } else { if ((rq->nr_sectors == 1) != (drive->status & DRQ_STAT)) { @@ -235,7 +235,7 @@ /* FIXME: this seems buggy */ if (rq->current_nr_sectors <= 0) { - if (!__ata_end_request(drive, rq, 1, 0)) + if (!ata_end_request(drive, rq, 1, 0)) return ATA_OP_FINISHED; } msect -= nsect; @@ -269,7 +269,7 @@ return ata_error(drive, rq, __FUNCTION__); } if (!rq->nr_sectors) { - __ata_end_request(drive, rq, 1, rq->hard_nr_sectors); + ata_end_request(drive, rq, 1, rq->hard_nr_sectors); rq->bio = NULL; ret = ATA_OP_FINISHED; } else if (!ok) { @@ -349,7 +349,7 @@ /* FIXME: this check doesn't make sense */ if (!(rq->flags & REQ_CMD)) { blk_dump_rq_flags(rq, "idedisk_do_request - bad command"); - __ata_end_request(drive, rq, 0, 0); + ata_end_request(drive, rq, 0, 0); return ATA_OP_FINISHED; } @@ -514,8 +514,8 @@ printk("sectors=%ld, ", rq->nr_sectors); printk("buffer=%p\n", rq->buffer); #endif - ar->cmd = cmd; - rq->special = ar; + ar->cmd = cmd; + rq->special = ar; } /* (ks/hs): Moved to start, do not use for multiple out commands. @@ -548,10 +548,9 @@ return ATA_OP_CONTINUES; } - /* FIXME: Warning check for race between handler and prehandler - * for writing first block of data. however since we are well - * inside the boundaries of the seek, we should be okay. - * FIXME: should be fixed --bzolnier + /* FIXME: Warning check for race between handlers for writing + * first block of data. However since we are well inside the + * boundaries of the seek, we should be okay. */ if (ar->command_type == IDE_DRIVE_TASK_RAW_WRITE) { ide_startstop_t ret; @@ -596,13 +595,15 @@ * * FIXME: Replace hard-coded 100, what about * error handling? + * + * FIXME: Whatabout the IRE clearing and not clearing case?! */ for (i = 0; i < 100; ++i) { - if (drive_is_ready(drive)) + if (ata_status_irq(drive)) break; } - if (!drive_is_ready(drive)) { + if (!ata_status_irq(drive)) { /* We are compleatly missing an error * return path here. * FIXME: We have only one? -alat @@ -1290,7 +1291,9 @@ return ret; } -static int idedisk_ioctl(struct ata_device *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static int idedisk_ioctl(struct ata_device *drive, + struct inode *inode, struct file *__fp, + unsigned int cmd, unsigned long arg) { struct hd_driveid *id = drive->id; @@ -1440,18 +1443,18 @@ * Subdriver functions. */ static struct ata_operations idedisk_driver = { - owner: THIS_MODULE, - attach: idedisk_attach, - cleanup: idedisk_cleanup, - standby: idedisk_standby, - do_request: idedisk_do_request, - end_request: NULL, - ioctl: idedisk_ioctl, - open: idedisk_open, - release: idedisk_release, - check_media_change: idedisk_check_media_change, - revalidate: NULL, /* use default method */ - capacity: idedisk_capacity, + .owner = THIS_MODULE, + .attach = idedisk_attach, + .cleanup = idedisk_cleanup, + .standby = idedisk_standby, + .do_request = idedisk_do_request, + .end_request = NULL, + .ioctl = idedisk_ioctl, + .open = idedisk_open, + .release = idedisk_release, + .check_media_change = idedisk_check_media_change, + .revalidate = NULL, /* use default method */ + .capacity = idedisk_capacity, }; static void idedisk_attach(struct ata_device *drive) diff -Nru a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c --- a/drivers/ide/ide-floppy.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide-floppy.c Sat Jul 20 12:12:35 2002 @@ -96,6 +96,7 @@ #include #include #include +#include #include #include @@ -367,7 +368,7 @@ return 0; if (!(rq->flags & REQ_SPECIAL)) { - __ata_end_request(drive, rq, uptodate, 0); + ata_end_request(drive, rq, uptodate, 0); return 0; } @@ -592,9 +593,9 @@ #if IDEFLOPPY_DEBUG_LOG printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); #endif /* IDEFLOPPY_DEBUG_LOG */ - clear_bit (PC_DMA_IN_PROGRESS, &pc->flags); + clear_bit(PC_DMA_IN_PROGRESS, &pc->flags); - ide__sti(); /* local CPU only */ + local_irq_enable(); if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) { /* Error detected */ #if IDEFLOPPY_DEBUG_LOG @@ -731,7 +732,6 @@ static ide_startstop_t idefloppy_transfer_pc1(struct ata_device *drive, struct request *rq) { idefloppy_floppy_t *floppy = drive->driver_data; - ide_startstop_t startstop; atapi_ireason_reg_t ireason; int ret; @@ -1345,18 +1345,18 @@ progress_indication=floppy->progress_indication; } /* Else assume format_unit has finished, and we're - ** at 0x10000 */ + * at 0x10000 + */ } else { atapi_status_reg_t status; unsigned long flags; - __save_flags(flags); - __cli(); + local_irq_save(flags); ata_status(drive, 0, 0); status.all = drive->status; - __restore_flags(flags); + local_irq_restore(flags); progress_indication= !status.b.dsc ? 0:0x10000; } @@ -1735,18 +1735,18 @@ * IDE subdriver functions, registered with ide.c */ static struct ata_operations idefloppy_driver = { - owner: THIS_MODULE, - attach: idefloppy_attach, - cleanup: idefloppy_cleanup, - standby: NULL, - do_request: idefloppy_do_request, - end_request: idefloppy_end_request, - ioctl: idefloppy_ioctl, - open: idefloppy_open, - release: idefloppy_release, - check_media_change: idefloppy_check_media_change, - revalidate: NULL, /* use default method */ - capacity: idefloppy_capacity, + .owner = THIS_MODULE, + .attach = idefloppy_attach, + .cleanup = idefloppy_cleanup, + .standby = NULL, + .do_request = idefloppy_do_request, + .end_request = idefloppy_end_request, + .ioctl = idefloppy_ioctl, + .open = idefloppy_open, + .release = idefloppy_release, + .check_media_change = idefloppy_check_media_change, + .revalidate = NULL, /* use default method */ + .capacity = idefloppy_capacity, }; static void idefloppy_attach(struct ata_device *drive) diff -Nru a/drivers/ide/ide-m8xx.c b/drivers/ide/ide-m8xx.c --- a/drivers/ide/ide-m8xx.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide-m8xx.c Sat Jul 20 12:12:35 2002 @@ -50,7 +50,7 @@ static void print_funcid (int func); static int check_ide_device (unsigned long base); -static void ide_interrupt_ack (void *dev); +static int ide_interrupt_ack(struct ata_channel *); static void m8xx_ide_tuneproc(struct ata_device *drive, byte pio); typedef struct ide_ioport_desc { @@ -326,7 +326,7 @@ /* register routine to tune PIO mode */ ide_hwifs[data_port].tuneproc = m8xx_ide_tuneproc; - hw->ack_intr = (ide_ack_intr_t *) ide_interrupt_ack; + hw->ack_intr = ide_interrupt_ack; /* Enable Harddisk Interrupt, * and make it edge sensitive */ @@ -401,7 +401,7 @@ ioport_dsc[data_port].reg_off[i], i, base + ioport_dsc[data_port].reg_off[i]); #endif - *p++ = base + ioport_dsc[data_port].reg_off[i]; + *p++ = base + ioport_dsc[data_port].reg_off[i]; } if (irq) { @@ -412,16 +412,16 @@ /* register routine to tune PIO mode */ ide_hwifs[data_port].tuneproc = m8xx_ide_tuneproc; - hw->ack_intr = (ide_ack_intr_t *) ide_interrupt_ack; + hw->ack_intr = ide_interrupt_ack; /* Enable Harddisk Interrupt, * and make it edge sensitive */ /* (11-18) Set edge detect for irq, no wakeup from low power mode */ ((immap_t *) IMAP_ADDR)->im_siu_conf.sc_siel |= (0x80000000 >> ioport_dsc[data_port].irq); -} /* m8xx_ide_init_hwif_ports() for CONFIG_IDE_8xx_DIRECT */ +} /* m8xx_ide_init_hwif_ports() for CONFIG_IDE_8xx_DIRECT */ -#endif /* CONFIG_IDE_8xx_DIRECT */ +#endif /* -------------------------------------------------------------------- */ @@ -493,11 +493,10 @@ printk("%s[%d] %s: not implemented yet!\n", __FILE__,__LINE__,__FUNCTION__); -#endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */ +#endif } -static void -ide_interrupt_ack (void *dev) +static int ide_interrupt_ack(struct ata_channel *ch) { #ifdef CONFIG_IDE_8xx_PCCARD u_int pscr, pipr; @@ -529,17 +528,17 @@ /* clear the interrupt sources */ ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr = pscr; -#else /* ! CONFIG_IDE_8xx_PCCARD */ +#else /* * Only CONFIG_IDE_8xx_PCCARD is using the interrupt of the * MPC8xx's PCMCIA controller, so there is nothing to be done here * for CONFIG_IDE_8xx_DIRECT and CONFIG_IDE_EXT_DIRECT. * The interrupt is handled somewhere else. -- Steven */ -#endif /* CONFIG_IDE_8xx_PCCARD */ -} - +#endif + return 0; +} /* * CIS Tupel codes @@ -655,7 +654,7 @@ q+= 2; } } -#endif /* DEBUG_PCMCIA */ +#endif switch (code) { case CISTPL_VERS_1: ident = p + 4; diff -Nru a/drivers/ide/ide-pci.c b/drivers/ide/ide-pci.c --- a/drivers/ide/ide-pci.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide-pci.c Sat Jul 20 12:12:35 2002 @@ -158,6 +158,80 @@ return 0; } +#ifdef CONFIG_BLK_DEV_IDEDMA +/* + * Setup DMA transfers on the channel. + */ +static void __init setup_channel_dma(struct pci_dev *dev, + struct ata_pci_device* d, + int autodma, + struct ata_channel *ch) +{ + unsigned long dma_base; + + if (d->flags & ATA_F_NOADMA) + autodma = 0; + + if (autodma) + ch->autodma = 1; + + if (!((d->flags & ATA_F_DMA) || ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80)))) + return; + + /* + * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space: + */ + dma_base = pci_resource_start(dev, 4); + if (dma_base) { + /* PDC20246, PDC20262, HPT343, & HPT366 */ + if ((ch->unit == ATA_PRIMARY) && d->extra) { + request_region(dma_base + 16, d->extra, dev->name); + ch->dma_extra = d->extra; + } + + /* If we are on the second channel, the dma base address will + * be one entry away from the primary interface. + */ + if (ch->unit == ATA_SECONDARY) + dma_base += 8; + + if (d->flags & ATA_F_SIMPLEX) { + outb(inb(dma_base + 2) & 0x60, dma_base + 2); + if (inb(dma_base + 2) & 0x80) + printk(KERN_INFO "%s: simplex device: DMA forced\n", dev->name); + } else { + /* If the device claims "simplex" DMA, this means only + * one of the two interfaces can be trusted with DMA at + * any point in time. So we should enable DMA only on + * one of the two interfaces. + */ + if ((inb(dma_base + 2) & 0x80)) { + if ((!ch->drives[0].present && !ch->drives[1].present) || + ch->unit == ATA_SECONDARY) { + printk(KERN_INFO "%s: simplex device: DMA disabled\n", dev->name); + dma_base = 0; + } + } + } + } else { + printk(KERN_INFO "%s: %s Bus-Master DMA was disabled by BIOS\n", + ch->name, dev->name); + + return; + } + + /* The function below will check itself whatever there is something to + * be done or not. We don't have therefore to care whatever it was + * already enabled by the primary channel run. + */ + pci_set_master(dev); + if (d->init_dma) + d->init_dma(ch, dma_base); + else + ata_init_dma(ch, dma_base); +} +#endif + /* * Setup a particular port on an ATA host controller. * @@ -171,7 +245,6 @@ int autodma) { unsigned long base = 0; - unsigned long dma_base; unsigned long ctl = 0; ide_pci_enablebit_t *e = &(d->enablebits[port]); struct ata_channel *ch; @@ -260,69 +333,13 @@ if (ch->udma_four) printk("%s: warning: ATA-66/100 forced bit set!\n", dev->name); + #ifdef CONFIG_BLK_DEV_IDEDMA /* * Setup DMA transfers on the channel. */ - if (!((d->flags & ATA_F_DMA) || ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80)))) - goto no_dma; - /* - * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space: - */ - dma_base = pci_resource_start(dev, 4); - if (dma_base) { - /* PDC20246, PDC20262, HPT343, & HPT366 */ - if ((ch->unit == ATA_PRIMARY) && d->extra) { - request_region(dma_base + 16, d->extra, dev->name); - ch->dma_extra = d->extra; - } - - /* If we are on the second channel, the dma base address will - * be one entry away from the primary interface. - */ - if (ch->unit == ATA_SECONDARY) - dma_base += 8; - - if (d->flags & ATA_F_SIMPLEX) { - outb(inb(dma_base + 2) & 0x60, dma_base + 2); - if (inb(dma_base + 2) & 0x80) - printk(KERN_INFO "%s: simplex device: DMA forced\n", dev->name); - } else { - /* If the device claims "simplex" DMA, this means only - * one of the two interfaces can be trusted with DMA at - * any point in time. So we should enable DMA only on - * one of the two interfaces. - */ - if ((inb(dma_base + 2) & 0x80)) { - if ((!ch->drives[0].present && !ch->drives[1].present) || - ch->unit == ATA_SECONDARY) { - printk(KERN_INFO "%s: simplex device: DMA disabled\n", dev->name); - dma_base = 0; - } - } - } - } else { - printk(KERN_INFO "%s: %s Bus-Master DMA was disabled by BIOS\n", - ch->name, dev->name); - - goto no_dma; - } - - /* The function below will check itself whatever there is something to - * be done or not. We don't have therefore to care whatever it was - * already enabled by the primary channel run. - */ - pci_set_master(dev); - - if (autodma) - ch->autodma = 1; - - if (d->init_dma) - d->init_dma(ch, dma_base); - else - ata_init_dma(ch, dma_base); + setup_channel_dma(dev, d, autodma, ch); #endif - no_dma: /* Call chipset-specific routine for each enabled channel. */ if (d->init_channel) @@ -680,75 +697,75 @@ */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_PCTECH, - device: PCI_DEVICE_ID_PCTECH_SAMURAI_IDE, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_PCTECH, + .device = PCI_DEVICE_ID_PCTECH_SAMURAI_IDE, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_CMD, - device: PCI_DEVICE_ID_CMD_640, - init_channel: ATA_PCI_IGNORE, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_CMD, + .device = PCI_DEVICE_ID_CMD_640, + .init_channel = ATA_PCI_IGNORE, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_NS, - device: PCI_DEVICE_ID_NS_87410, - enablebits: {{0x43,0x08,0x08}, {0x47,0x08,0x08}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_NS, + .device = PCI_DEVICE_ID_NS_87410, + .enablebits = {{0x43,0x08,0x08}, {0x47,0x08,0x08}}, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_HINT, - device: PCI_DEVICE_ID_HINT_VXPROII_IDE, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_HINT, + .device = PCI_DEVICE_ID_HINT_VXPROII_IDE, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_HOLTEK, - device: PCI_DEVICE_ID_HOLTEK_6565, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_HOLTEK, + .device = PCI_DEVICE_ID_HOLTEK_6565, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82371MX, - enablebits: {{0x6D,0x80,0x80}, {0x00,0x00,0x00}}, - bootable: ON_BOARD, - flags: ATA_F_NODMA + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82371MX, + .enablebits = {{0x6D,0x80,0x80}, {0x00,0x00,0x00}}, + .bootable = ON_BOARD, + .flags = ATA_F_NODMA }, { - vendor: PCI_VENDOR_ID_UMC, - device: PCI_DEVICE_ID_UMC_UM8673F, - bootable: ON_BOARD, - flags: ATA_F_FIXIRQ + .vendor = PCI_VENDOR_ID_UMC, + .device = PCI_DEVICE_ID_UMC_UM8673F, + .bootable = ON_BOARD, + .flags = ATA_F_FIXIRQ }, { - vendor: PCI_VENDOR_ID_UMC, - device: PCI_DEVICE_ID_UMC_UM8886A, - bootable: ON_BOARD, - flags: ATA_F_FIXIRQ + .vendor = PCI_VENDOR_ID_UMC, + .device = PCI_DEVICE_ID_UMC_UM8886A, + .bootable = ON_BOARD, + .flags = ATA_F_FIXIRQ }, { - vendor: PCI_VENDOR_ID_UMC, - device: PCI_DEVICE_ID_UMC_UM8886BF, - bootable: ON_BOARD, - flags: ATA_F_FIXIRQ + .vendor = PCI_VENDOR_ID_UMC, + .device = PCI_DEVICE_ID_UMC_UM8886BF, + .bootable = ON_BOARD, + .flags = ATA_F_FIXIRQ }, { - vendor: PCI_VENDOR_ID_VIA, - device: PCI_DEVICE_ID_VIA_82C561, - bootable: ON_BOARD, - flags: ATA_F_NOADMA + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_82C561, + .bootable = ON_BOARD, + .flags = ATA_F_NOADMA }, { - vendor: PCI_VENDOR_ID_VIA, - device: PCI_DEVICE_ID_VIA_82C586_1, - bootable: ON_BOARD, - flags: ATA_F_NOADMA + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_82C586_1, + .bootable = ON_BOARD, + .flags = ATA_F_NOADMA }, { - vendor: PCI_VENDOR_ID_TTI, - device: PCI_DEVICE_ID_TTI_HPT366, - bootable: OFF_BOARD, - extra: 240, - flags: ATA_F_IRQ | ATA_F_HPTHACK + .vendor = PCI_VENDOR_ID_TTI, + .device = PCI_DEVICE_ID_TTI_HPT366, + .bootable = OFF_BOARD, + .extra = 240, + .flags = ATA_F_IRQ | ATA_F_HPTHACK } }; diff -Nru a/drivers/ide/ide-pmac.c b/drivers/ide/ide-pmac.c --- a/drivers/ide/ide-pmac.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide-pmac.c Sat Jul 20 12:12:35 2002 @@ -419,10 +419,10 @@ OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG); OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG); udelay(1); - __save_flags(flags); /* local CPU only */ - ide__sti(); /* local CPU only -- for jiffies */ + __save_flags(flags); + local_irq_enable(); result = wait_for_ready(drive); - __restore_flags(flags); /* local CPU only */ + __restore_flags(flags); ata_irq_enable(drive, 1); if (result) printk(KERN_ERR "pmac_ide_do_setfeature disk not ready after SET_FEATURE !\n"); diff -Nru a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c --- a/drivers/ide/ide-tape.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/ide-tape.c Sat Jul 20 12:12:35 2002 @@ -421,6 +421,7 @@ #include #include #include +#include #include #include @@ -1880,9 +1881,9 @@ if (tape->debug_level >= 2) printk (KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred); #endif - clear_bit (PC_DMA_IN_PROGRESS, &pc->flags); + clear_bit(PC_DMA_IN_PROGRESS, &pc->flags); - ide__sti(); /* local CPU only */ + local_irq_enable(); #if SIMULATE_ERRORS if ((pc->c[0] == IDETAPE_WRITE_CMD || pc->c[0] == IDETAPE_READ_CMD) && (++error_sim_count % 100) == 0) { @@ -2445,7 +2446,7 @@ * We do not support buffer cache originated requests. */ printk (KERN_NOTICE "ide-tape: %s: Unsupported command in request queue (%ld)\n", drive->name, rq->flags); - __ata_end_request(drive, rq, 0, 0); /* Let the common code handle it */ + ata_end_request(drive, rq, 0, 0); /* Let the common code handle it */ return ATA_OP_FINISHED; } @@ -5925,17 +5926,17 @@ static void idetape_attach(struct ata_device *); static struct ata_operations idetape_driver = { - owner: THIS_MODULE, - attach: idetape_attach, - cleanup: idetape_cleanup, - standby: NULL, - do_request: idetape_do_request, - end_request: idetape_end_request, - ioctl: idetape_blkdev_ioctl, - open: idetape_blkdev_open, - release: idetape_blkdev_release, - check_media_change: NULL, - revalidate: idetape_revalidate, + .owner = THIS_MODULE, + .attach = idetape_attach, + .cleanup = idetape_cleanup, + .standby = NULL, + .do_request = idetape_do_request, + .end_request = idetape_end_request, + .ioctl = idetape_blkdev_ioctl, + .open = idetape_blkdev_open, + .release = idetape_blkdev_release, + .check_media_change = NULL, + .revalidate = idetape_revalidate, }; @@ -5944,12 +5945,12 @@ * Our character device supporting functions, passed to register_chrdev. */ static struct file_operations idetape_fops = { - owner: THIS_MODULE, - read: idetape_chrdev_read, - write: idetape_chrdev_write, - ioctl: idetape_chrdev_ioctl, - open: idetape_chrdev_open, - release: idetape_chrdev_release, + .owner = THIS_MODULE, + .read = idetape_chrdev_read, + .write = idetape_chrdev_write, + .ioctl = idetape_chrdev_ioctl, + .open = idetape_chrdev_open, + .release = idetape_chrdev_release, }; static void idetape_attach(struct ata_device *drive) diff -Nru a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c --- a/drivers/ide/ide-taskfile.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide-taskfile.c Sat Jul 20 12:12:35 2002 @@ -33,18 +33,9 @@ #include #include -#define DEBUG_TASKFILE 0 /* unset when fixed */ - -#if DEBUG_TASKFILE -#define DTF(x...) printk(##x) -#else -#define DTF(x...) -#endif - /* * Data transfer functions for polled IO. */ - static void ata_read_32(struct ata_device *drive, void *buffer, unsigned int wcount) { insl(IDE_DATA_REG, buffer, wcount); @@ -144,108 +135,15 @@ } /* - * Needed for PCI irq sharing - */ -int drive_is_ready(struct ata_device *drive) -{ - if (test_bit(IDE_DMA, drive->channel->active)) - return udma_irq_status(drive); - - /* - * Need to guarantee 400ns since last command was issued? - */ - - /* FIXME: promote this to the general status read method perhaps. - */ -#ifdef CONFIG_IDEPCI_SHARE_IRQ - /* - * We do a passive status test under shared PCI interrupts on - * cards that truly share the ATA side interrupt, but may also share - * an interrupt with another pci card/device. We make no assumptions - * about possible isa-pnp and pci-pnp issues yet. - */ - if (drive->channel->io_ports[IDE_CONTROL_OFFSET]) - drive->status = GET_ALTSTAT(); - else -#endif - ata_status(drive, 0, 0); /* Note: this may clear a pending IRQ! */ - - if (drive->status & BUSY_STAT) - return 0; /* drive busy: definitely not interrupting */ - - return 1; /* drive ready: *might* be interrupting */ -} - -/* - * This function issues a special IDE device request onto the request queue. - * - * If action is ide_wait, then the rq is queued at the end of the request - * queue, and the function sleeps until it has been processed. This is for use - * when invoked from an ioctl handler. - * - * If action is ide_preempt, then the rq is queued at the head of the request - * queue, displacing the currently-being-processed request and this function - * returns immediately without waiting for the new rq to be completed. This is - * VERY DANGEROUS, and is intended for careful use by the ATAPI tape/cdrom - * driver code. - * - * If action is ide_end, then the rq is queued at the end of the request queue, - * and the function returns immediately without waiting for the new rq to be - * completed. This is again intended for careful use by the ATAPI tape/cdrom - * driver code. - */ -int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t action) -{ - unsigned long flags; - struct ata_channel *ch = drive->channel; - unsigned int major = ch->major; - request_queue_t *q = &drive->queue; - struct list_head *queue_head = &q->queue_head; - DECLARE_COMPLETION(wait); - -#ifdef CONFIG_BLK_DEV_PDC4030 - if (ch->chipset == ide_pdc4030 && rq->buffer) - return -ENOSYS; /* special drive cmds not supported */ -#endif - rq->errors = 0; - rq->rq_status = RQ_ACTIVE; - rq->rq_dev = mk_kdev(major,(drive->select.b.unit)<waiting = &wait; - - spin_lock_irqsave(ch->lock, flags); - - if (action == ide_preempt) - drive->rq = NULL; - else if (!blk_queue_empty(&drive->queue)) - queue_head = queue_head->prev; /* ide_end and ide_wait */ - - __elv_add_request(q, rq, queue_head); - - do_ide_request(q); - - spin_unlock_irqrestore(ch->lock, flags); - - if (action == ide_wait) { - wait_for_completion(&wait); /* wait for it to be serviced */ - return rq->errors ? -EIO : 0; /* return -EIO if errors */ - } - - return 0; -} - - -/* * Invoked on completion of a special REQ_SPECIAL command. */ -static ide_startstop_t special_intr(struct ata_device *drive, struct - request *rq) { +static ide_startstop_t special_intr(struct ata_device *drive, struct request *rq) { unsigned long flags; struct ata_channel *ch =drive->channel; struct ata_taskfile *ar = rq->special; ide_startstop_t ret = ATA_OP_FINISHED; - ide__sti(); + local_irq_enable(); if (rq->buffer && ar->taskfile.sector_number) { if (!ata_status(drive, 0, DRQ_STAT) && ar->taskfile.sector_number) { @@ -290,21 +188,52 @@ int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar, char *buf) { - struct request req; + struct request *rq; + unsigned long flags; + struct ata_channel *ch = drive->channel; + request_queue_t *q = &drive->queue; + struct list_head *queue_head = &q->queue_head; + DECLARE_COMPLETION(wait); + +#ifdef CONFIG_BLK_DEV_PDC4030 + if (ch->chipset == ide_pdc4030 && buf) + return -ENOSYS; /* special drive cmds not supported */ +#endif + + rq = __blk_get_request(&drive->queue, READ); + if (!rq) + rq = __blk_get_request(&drive->queue, WRITE); + + /* + * FIXME: Make sure there is a free slot on the list! + */ + + BUG_ON(!rq); + + rq->flags = REQ_SPECIAL; + rq->buffer = buf; + rq->special = ar; + rq->errors = 0; + rq->rq_status = RQ_ACTIVE; + rq->waiting = &wait; - ar->command_type = IDE_DRIVE_TASK_NO_DATA; ar->XXX_handler = special_intr; + ar->command_type = IDE_DRIVE_TASK_NO_DATA; + + spin_lock_irqsave(ch->lock, flags); + + if (!blk_queue_empty(&drive->queue)) + queue_head = queue_head->prev; + __elv_add_request(q, rq, queue_head); + + q->request_fn(q); + spin_unlock_irqrestore(ch->lock, flags); - memset(&req, 0, sizeof(req)); - req.flags = REQ_SPECIAL; - req.buffer = buf; - req.special = ar; + wait_for_completion(&wait); /* wait for it to be serviced */ - return ide_do_drive_cmd(drive, &req, ide_wait); + return rq->errors ? -EIO : 0; /* return -EIO if errors */ } -EXPORT_SYMBOL(drive_is_ready); -EXPORT_SYMBOL(ide_do_drive_cmd); EXPORT_SYMBOL(ata_read); EXPORT_SYMBOL(ata_write); EXPORT_SYMBOL(ide_raw_taskfile); diff -Nru a/drivers/ide/ide.c b/drivers/ide/ide.c --- a/drivers/ide/ide.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ide.c Sat Jul 20 12:12:35 2002 @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -106,7 +105,7 @@ return 0; } -int __ata_end_request(struct ata_device *drive, struct request *rq, int uptodate, unsigned int nr_secs) +int ata_end_request(struct ata_device *drive, struct request *rq, int uptodate, unsigned int nr_secs) { unsigned long flags; struct ata_channel *ch = drive->channel; @@ -132,7 +131,7 @@ } if (!end_that_request_first(rq, uptodate, nr_secs)) { - add_blkdev_randomness(major(rq->rq_dev)); + add_blkdev_randomness(ch->major); if (!blk_rq_tagged(rq)) blkdev_dequeue_request(rq); else @@ -192,12 +191,13 @@ if (mode > XFER_UDMA_0) mode--; else + /* - * OOPS we do not goto non Ultra DMA modes - * without iCRC's available we force - * the system to PIO and make the user - * invoke the ATA-1 ATA-2 DMA modes. + * We do not do non Ultra DMA modes. Without iCRC's + * available, we force the system to PIO and make the + * user select the ATA-1 ATA-2 DMA modes himself. */ + mode = XFER_PIO_4; drive->channel->speedproc(drive, mode); @@ -220,157 +220,9 @@ if (ata_ops(drive) && ata_ops(drive)->capacity) return ata_ops(drive)->capacity(drive); - /* This used to be 0x7fffffff, but since now we use the maximal drive - * capacity value used by other kernel subsystems as well. - */ - return ~0UL; } -extern struct block_device_operations ide_fops[]; - -static ide_startstop_t do_reset1(struct ata_device *, int); /* needed below */ - -/* - * Poll the interface for completion every 50ms during an ATAPI drive reset - * operation. If the drive has not yet responded, and we have not yet hit our - * maximum waiting time, then the timer is restarted for another 50ms. - */ -static ide_startstop_t atapi_reset_pollfunc(struct ata_device *drive, struct request *__rq) -{ - struct ata_channel *ch = drive->channel; - int ret = ATA_OP_FINISHED; - - ata_select(drive, 10); - if (!ata_status(drive, 0, BUSY_STAT)) { - if (time_before(jiffies, ch->poll_timeout)) { - ata_set_handler(drive, atapi_reset_pollfunc, HZ/20, NULL); - ret = ATA_OP_CONTINUES; /* continue polling */ - } else { - ch->poll_timeout = 0; /* end of polling */ - printk("%s: ATAPI reset timed out, status=0x%02x\n", drive->name, drive->status); - - ret = do_reset1(drive, 0); /* do it the old fashioned way */ - } - } else { - printk("%s: ATAPI reset complete\n", drive->name); - ch->poll_timeout = 0; /* done polling */ - - ret = ATA_OP_FINISHED; - } - - return ret; -} - -/* - * Poll the interface for completion every 50ms during an ata reset operation. - * If the drives have not yet responded, and we have not yet hit our maximum - * waiting time, then the timer is restarted for another 50ms. - */ -static ide_startstop_t reset_pollfunc(struct ata_device *drive, struct request *__rq) -{ - struct ata_channel *ch = drive->channel; - int ret; - - if (!ata_status(drive, 0, BUSY_STAT)) { - if (time_before(jiffies, ch->poll_timeout)) { - ata_set_handler(drive, reset_pollfunc, HZ/20, NULL); - ret = ATA_OP_CONTINUES; /* continue polling */ - } else { - ch->poll_timeout = 0; /* done polling */ - printk("%s: reset timed out, status=0x%02x\n", ch->name, drive->status); - ++drive->failures; - ret = ATA_OP_FINISHED; - } - } else { - u8 stat; - - ch->poll_timeout = 0; /* done polling */ - printk("%s: reset: ", ch->name); - if ((stat = GET_ERR()) == 1) { - printk("success\n"); - drive->failures = 0; - } else { - const char *msg = ""; - -#if FANCY_STATUS_DUMPS - u8 val; - static const char *messages[5] = { - " passed", - " formatter device", - " sector buffer", - " ECC circuitry", - " controlling MPU error" - }; - - printk("master:"); - val = stat & 0x7f; - if (val >= 1 && val <= 5) - msg = messages[val -1]; - if (stat & 0x80) - printk("; slave:"); -#endif - printk(KERN_ERR "%s error [%02x]\n", msg, stat); - ++drive->failures; - } - - ret = ATA_OP_FINISHED; - } - - return ret; -} - -/* - * Attempt to recover a confused drive by resetting it. Unfortunately, - * resetting a disk drive actually resets all devices on the same interface, so - * it can really be thought of as resetting the interface rather than resetting - * the drive. - * - * ATAPI devices have their own reset mechanism which allows them to be - * individually reset without clobbering other devices on the same interface. - * - * Unfortunately, the IDE interface does not generate an interrupt to let us - * know when the reset operation has finished, so we must poll for this. - * Equally poor, though, is the fact that this may a very long time to - * complete, (up to 30 seconds worst case). So, instead of busy-waiting here - * for it, we set a timer to poll at 50ms intervals. - */ -static ide_startstop_t do_reset1(struct ata_device *drive, int try_atapi) -{ - unsigned int unit; - unsigned long flags; - struct ata_channel *ch = drive->channel; - - /* FIXME: --bzolnier */ - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ - - /* For an ATAPI device, first try an ATAPI SRST. */ - if (try_atapi) { - if (drive->type != ATA_DISK) { - check_crc_errors(drive); - ata_select(drive, 20); - OUT_BYTE(WIN_SRST, IDE_COMMAND_REG); - ch->poll_timeout = jiffies + WAIT_WORSTCASE; - ata_set_handler(drive, atapi_reset_pollfunc, HZ/20, NULL); - __restore_flags(flags); /* local CPU only */ - - return ATA_OP_CONTINUES; - } - } - - /* - * First, reset any device state data we were maintaining - * for any of the drives on this interface. - */ - for (unit = 0; unit < MAX_DRIVES; ++unit) - check_crc_errors(&ch->drives[unit]); - - __restore_flags(flags); /* local CPU only */ - - return ATA_OP_CONTINUES; -} - static inline u32 read_24(struct ata_device *drive) { return (IN_BYTE(IDE_HCYL_REG) << 16) | @@ -436,8 +288,8 @@ u8 err = 0; /* FIXME: --bzolnier */ - __save_flags (flags); /* local CPU only */ - ide__sti(); /* local CPU only */ + __save_flags(flags); + local_irq_enable(); printk("%s: %s: status=0x%02x", drive->name, msg, drive->status); dump_bits(ata_status_msgs, ARRAY_SIZE(ata_status_msgs), drive->status); @@ -485,62 +337,16 @@ #endif printk("\n"); } - __restore_flags (flags); /* local CPU only */ - return err; -} - -/* - * This gets invoked in response to a drive unexpectedly having its DRQ_STAT - * bit set. As an alternative to resetting the drive, it tries to clear the - * condition by reading a sector's worth of data from the drive. Of course, - * this may not help if the drive is *waiting* for data from *us*. - */ -static void try_to_flush_leftover_data(struct ata_device *drive) -{ - int i; - - if (drive->type != ATA_DISK) - return; - - for (i = (drive->mult_count ? drive->mult_count : 1); i > 0; --i) { - u32 buffer[SECTOR_WORDS]; + __restore_flags (flags); - ata_read(drive, buffer, SECTOR_WORDS); - } -} - -#ifdef CONFIG_BLK_DEV_PDC4030 -# define IS_PDC4030_DRIVE (drive->channel->chipset == ide_pdc4030) -#else -# define IS_PDC4030_DRIVE (0) /* auto-NULLs out pdc4030 code */ -#endif - -/* - * We are still on the old request path here so issuing the recalibrate command - * directly should just work. - */ -static int do_recalibrate(struct ata_device *drive) -{ - - if (drive->type != ATA_DISK) - return ATA_OP_FINISHED; - - if (!IS_PDC4030_DRIVE) { - struct ata_taskfile args; - - printk(KERN_INFO "%s: recalibrating...\n", drive->name); - memset(&args, 0, sizeof(args)); - args.taskfile.sector_count = drive->sect; - args.cmd = WIN_RESTORE; - ide_raw_taskfile(drive, &args, NULL); - printk(KERN_INFO "%s: done!\n", drive->name); - } - - return IS_PDC4030_DRIVE ? ATA_OP_FINISHED : ATA_OP_CONTINUES; + return err; } /* * Take action based on the error returned by the drive. + * + * FIXME: Separate the error handling code out and call it only in cases where + * we really wan't to try to recover from the error and not just reporting. */ ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const char *msg) { @@ -549,12 +355,9 @@ err = ata_dump(drive, rq, msg); - /* FIXME: at least !drive check is bogus --bzolnier */ - if (!drive || !rq) - return ATA_OP_FINISHED; - - /* retry only "normal" I/O: */ - if (!(rq->flags & REQ_CMD)) { + /* Only try to recover from block I/O operations. + */ + if (!rq || !(rq->flags & REQ_CMD)) { rq->errors = 1; return ATA_OP_FINISHED; @@ -562,10 +365,11 @@ /* other bits are useless when BUSY */ if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) - rq->errors |= ERROR_RESET; /* FIXME: What's that?! */ - else { - if (drive->type == ATA_DISK && (stat & ERR_STAT)) { - /* err has different meaning on cdrom and tape */ + rq->errors |= ERROR_RESET; + else if (drive->type == ATA_DISK) { + /* The error bit has different meaning on cdrom and tape. + */ + if (stat & ERR_STAT) { if (err == ABRT_ERR) { if (drive->select.b.lba && IN_BYTE(IDE_COMMAND_REG) == WIN_SPECIFY) return ATA_OP_FINISHED; /* some newer drives don't support WIN_SPECIFY */ @@ -573,109 +377,128 @@ drive->crc_count++; /* UDMA crc error -- just retry the operation */ else if (err & (BBD_ERR | ECC_ERR)) /* retries won't help these */ rq->errors = ERROR_MAX; - else if (err & TRK0_ERR) /* help it find track zero */ - rq->errors |= ERROR_RECAL; } - /* pre bio (rq->cmd != WRITE) */ - if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ) - try_to_flush_leftover_data(drive); + + /* As an alternative to resetting the drive, we try to clear + * the condition by reading a sector's worth of data from the + * drive. Of course, this can not help if the drive is + * *waiting* for data from *us*. + */ + + if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ) { + int i; + + for (i = (drive->mult_count ? drive->mult_count : 1); i > 0; --i) { + u32 buffer[SECTOR_WORDS]; + + ata_read(drive, buffer, SECTOR_WORDS); + } + } } + /* Force an abort if not even the status data is available. This will + * clear all pending IRQs on the drive as well. + */ if (!ata_status(drive, 0, BUSY_STAT | DRQ_STAT)) - OUT_BYTE(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); /* force an abort */ + OUT_BYTE(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); + /* Bail out immediately. */ if (rq->errors >= ERROR_MAX) { printk(KERN_ERR "%s: max number of retries exceeded!\n", drive->name); if (ata_ops(drive) && ata_ops(drive)->end_request) ata_ops(drive)->end_request(drive, rq, 0); else - __ata_end_request(drive, rq, 0, 0); - } else { - ++rq->errors; - if ((rq->errors & ERROR_RESET) == ERROR_RESET) - return do_reset1(drive, 1); - if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) - /* FIXME: tries to acquire the channel lock -Zwane */ - return do_recalibrate(drive); - } - - return ATA_OP_FINISHED; -} - -/* - * This initiates handling of a new I/O request. - */ -static ide_startstop_t start_request(struct ata_device *drive, struct request *rq) -{ - struct ata_channel *ch = drive->channel; - sector_t block; - unsigned int minor = minor(rq->rq_dev); - unsigned int unit = minor >> PARTN_BITS; - ide_startstop_t ret; - - BUG_ON(!(rq->flags & REQ_STARTED)); - -#ifdef DEBUG - printk("%s: %s: current=0x%08lx\n", ch->name, __FUNCTION__, (unsigned long) rq); -#endif + ata_end_request(drive, rq, 0, 0); - /* bail early if we've exceeded max_failures */ - if (drive->max_failures && (drive->failures > drive->max_failures)) - goto kill_rq; - - if (unit >= MAX_DRIVES) { - printk(KERN_ERR "%s: bad device number: %s\n", ch->name, kdevname(rq->rq_dev)); - goto kill_rq; + return ATA_OP_FINISHED; } - block = rq->sector; + ++rq->errors; + printk(KERN_INFO "%s: request error, nr. %d\n", drive->name, rq->errors); - /* Strange disk manager remap. + /* + * Attempt to recover a confused drive by resetting it. Unfortunately, + * resetting a disk drive actually resets all devices on the same + * interface, so it can really be thought of as resetting the interface + * rather than resetting the drive. + * + * ATAPI devices have their own reset mechanism which allows them to be + * individually reset without clobbering other devices on the same + * interface. + * + * The IDE interface does not generate an interrupt to let us know when + * the reset operation has finished, so we must poll for this. This + * may take a very long time to complete. + * + * Maybe we can check if we are in IRQ context and schedule the CPU + * during this time. But for certain we should block all data transfers + * on the channel in question during those operations. */ - if (rq->flags & REQ_CMD) - if (drive->type == ATA_DISK || drive->type == ATA_FLOPPY) - block += drive->sect0; - /* Yecch - this will shift the entire interval, possibly killing some - * innocent following sector. - */ - if (block == 0 && drive->remap_0_to_1 == 1) - block = 1; /* redirect MBR access to EZ-Drive partn table */ + if ((rq->errors & ERROR_RESET) == ERROR_RESET) { + unsigned int unit; + struct ata_channel *ch = drive->channel; + int ret; - ata_select(drive, 0); - ret = ata_status_poll(drive, drive->ready_stat, BUSY_STAT | DRQ_STAT, - WAIT_READY, rq); - if (ret != ATA_OP_READY) { - printk(KERN_WARNING "%s: drive not ready for command\n", drive->name); + /* For an ATAPI device, first try an ATAPI SRST. + */ - goto kill_rq; - } + if (drive->type != ATA_DISK) { + check_crc_errors(drive); + ata_select(drive, 20); + udelay(1); + ata_irq_enable(drive, 0); + OUT_BYTE(WIN_SRST, IDE_COMMAND_REG); + if (drive->quirk_list == 2) + ata_irq_enable(drive, 1); + udelay(1); + ret = ata_status_poll(drive, 0, BUSY_STAT, WAIT_WORSTCASE, NULL); + ata_mask(drive); + + if (ret == ATA_OP_READY) { + printk("%s: ATAPI reset complete\n", drive->name); + + return ATA_OP_CONTINUES; + } else + printk(KERN_ERR "%s: ATAPI reset timed out, status=0x%02x\n", + drive->name, drive->status); + } - if (!ata_ops(drive)) { - printk(KERN_WARNING "%s: device type %d not supported\n", - drive->name, drive->type); - goto kill_rq; - } + /* Reset all devices on channel. + */ - /* The normal way of execution is to pass and execute the request - * handler down to the device type driver. - */ + /* First, reset any device state data we were maintaining for + * any of the drives on this interface. + */ + for (unit = 0; unit < MAX_DRIVES; ++unit) + check_crc_errors(&ch->drives[unit]); - if (ata_ops(drive)->do_request) { - ret = ata_ops(drive)->do_request(drive, rq, block); - } else { - __ata_end_request(drive, rq, 0, 0); - ret = ATA_OP_FINISHED; - } - return ret; + /* And now actually perform the reset operation. + */ + printk("%s: ATA reset...\n", ch->name); + ata_select(drive, 20); + udelay(1); + ata_irq_enable(drive, 0); -kill_rq: - if (ata_ops(drive) && ata_ops(drive)->end_request) - ata_ops(drive)->end_request(drive, rq, 0); - else - __ata_end_request(drive, rq, 0, 0); + /* This command actually looks suspicious, since I couldn't + * find it in any standard document. + */ + OUT_BYTE(0x04, ch->io_ports[IDE_CONTROL_OFFSET]); + udelay(10); + OUT_BYTE(WIN_NOP, ch->io_ports[IDE_CONTROL_OFFSET]); + ret = ata_status_poll(drive, 0, BUSY_STAT, WAIT_WORSTCASE, NULL); + ata_mask(drive); + + if (ret == ATA_OP_READY) + printk("%s: ATA reset complete\n", drive->name); + else + printk(KERN_ERR "%s: ATA reset timed out, status=0x%02x\n", + drive->name, drive->status); + mdelay(100); + } - return ATA_OP_FINISHED; + /* signal that we should retry this request */ + return ATA_OP_CONTINUES; } /* @@ -689,218 +512,256 @@ drive->sleep = timeout + jiffies; } - /* - * Determine the longest sleep time for the devices at this channel. + * Issue a new request. + * Caller must have already done spin_lock_irqsave(channel->lock, ...) */ -static unsigned long longest_sleep(struct ata_channel *channel) +static void do_request(struct ata_channel *channel) { - unsigned long sleep = 0; - int unit; - - for (unit = 0; unit < MAX_DRIVES; ++unit) { - struct ata_device *drive = &channel->drives[unit]; - - if (!drive->present) - continue; - - /* This device is sleeping and waiting to be serviced - * later than any other device we checked thus far. - */ - if (drive->sleep && (!sleep || time_after(drive->sleep, sleep))) - sleep = drive->sleep; - } + struct ata_channel *ch; + struct ata_device *drive = NULL; + unsigned int unit; + ide_startstop_t ret; - return sleep; -} + local_irq_disable(); /* necessary paranoia */ -/* - * Select the next device which will be serviced. This selects only between - * devices on the same channel, since everything else will be scheduled on the - * queue level. - */ -static struct ata_device *choose_urgent_device(struct ata_channel *channel) -{ - struct ata_device *choice = NULL; - unsigned long sleep = 0; - int unit; + /* + * Select the next device which will be serviced. This selects + * only between devices on the same channel, since everything + * else will be scheduled on the queue level. + */ for (unit = 0; unit < MAX_DRIVES; ++unit) { - struct ata_device *drive = &channel->drives[unit]; + struct ata_device *tmp = &channel->drives[unit]; - if (!drive->present) + if (!tmp->present) continue; - /* There are no request pending for this device. + /* There are no requests pending for this device. */ - if (blk_queue_empty(&drive->queue)) + if (blk_queue_empty(&tmp->queue)) continue; + /* This device still wants to remain idle. */ - if (drive->sleep && time_after(drive->sleep, jiffies)) + if (tmp->sleep && time_after(tmp->sleep, jiffies)) continue; - /* Take this device, if there is no device choosen thus far or - * it's more urgent. + /* Take this device, if there is no device choosen thus + * far or which is more urgent. */ - if (!choice || (drive->sleep && (!choice->sleep || time_after(choice->sleep, drive->sleep)))) { - if (!blk_queue_plugged(&drive->queue)) - choice = drive; + if (!drive || (tmp->sleep && (!drive->sleep || time_after(drive->sleep, tmp->sleep)))) { + if (!blk_queue_plugged(&tmp->queue)) + drive = tmp; } } - if (choice) - return choice; + if (!drive) { + unsigned long sleep = 0; - sleep = longest_sleep(channel); + for (unit = 0; unit < MAX_DRIVES; ++unit) { + struct ata_device *tmp = &channel->drives[unit]; - if (sleep) { + if (!tmp->present) + continue; - /* - * Take a short snooze, and then wake up again. Just in case - * there are big differences in relative throughputs.. don't - * want to hog the cpu too much. - */ + /* This device is sleeping and waiting to be serviced + * earlier than any other device we checked thus far. + */ + if (tmp->sleep && (!sleep || time_after(sleep, tmp->sleep))) + sleep = tmp->sleep; + } + + if (sleep) { + /* + * Take a short snooze, and then wake up again. Just + * in case there are big differences in relative + * throughputs.. don't want to hog the cpu too much. + */ - if (time_after(jiffies, sleep - WAIT_MIN_SLEEP)) - sleep = jiffies + WAIT_MIN_SLEEP; + if (time_after(jiffies, sleep - WAIT_MIN_SLEEP)) + sleep = jiffies + WAIT_MIN_SLEEP; #if 1 - if (timer_pending(&channel->timer)) - printk(KERN_ERR "%s: timer already active\n", __FUNCTION__); + if (timer_pending(&channel->timer)) + printk(KERN_ERR "%s: timer already active\n", __FUNCTION__); #endif - set_bit(IDE_SLEEP, channel->active); - mod_timer(&channel->timer, sleep); - /* we purposely leave hwgroup busy while sleeping */ - } else { - /* FIXME: use queue plugging instead of active to - * block upper layers from stomping on us */ - /* Ugly, but how can we sleep for the lock otherwise? */ - ide_release_lock(&ide_irq_lock);/* for atari only */ - clear_bit(IDE_BUSY, channel->active); - } + set_bit(IDE_SLEEP, channel->active); + mod_timer(&channel->timer, sleep); - return NULL; -} + /* + * We purposely leave us busy while sleeping becouse we + * are prepared to handle the IRQ from it. + * + * FIXME: Make sure sleeping can't interferre with + * operations of other devices on the same channel. + */ + } else { + /* FIXME: use queue plugging instead of active to block + * upper layers from stomping on us */ + /* Ugly, but how can we sleep for the lock otherwise? + * */ + + ide_release_lock(&ide_irq_lock);/* for atari only */ + clear_bit(IDE_BUSY, channel->active); + + /* All requests are done. + * + * Disable IRQs from the last drive on this channel, to + * make sure that it wan't throw stones at us when we + * are not prepared to take them. + */ -/* - * Issue a new request. - * Caller must have already done spin_lock_irqsave(channel->lock, ...) - */ -static void do_request(struct ata_channel *channel) -{ - ide_get_lock(&ide_irq_lock, ata_irq_request, channel);/* for atari only: POSSIBLY BROKEN HERE(?) */ - __cli(); /* necessary paranoia: ensure IRQs are masked on local CPU */ + if (channel->drive && !channel->drive->using_tcq) + ata_irq_enable(channel->drive, 0); + } - while (!test_and_set_bit(IDE_BUSY, channel->active)) { - struct ata_channel *ch; - struct ata_device *drive; - struct request *rq = NULL; - ide_startstop_t startstop; - int i; + return; + } - /* this will clear IDE_BUSY, if appropriate */ - drive = choose_urgent_device(channel); + /* Remember the last drive we where acting on. + */ + ch = drive->channel; + ch->drive = drive; - if (!drive) - break; + /* Feed commands to a drive until it barfs. + */ + do { + struct request *rq = NULL; + sector_t block; - /* Remember the last drive we where acting on. + /* Abort early if we can't queue another command. for non tcq, + * ata_can_queue is always 1 since we never get here unless the + * drive is idle. */ - ch = drive->channel; - ch->drive = drive; - /* Make sure that all drives on channels sharing the IRQ line - * with us won't generate IRQ's during our activity. - */ - for (i = 0; i < MAX_HWIFS; ++i) { - struct ata_channel *tmp = &ide_hwifs[i]; - int j; + if (!ata_can_queue(drive)) { + if (!ata_pending_commands(drive)) { + clear_bit(IDE_BUSY, ch->active); + if (drive->using_tcq) + ata_irq_enable(drive, 0); + } + break; + } - if (!tmp->present) - continue; + drive->sleep = 0; - if (ch->lock != tmp->lock) - continue; + if (test_bit(IDE_DMA, ch->active)) { + printk(KERN_ERR "%s: error: DMA in progress...\n", drive->name); + break; + } - /* Only care if there is any drive on the channel in - * question. - */ - for (j = 0; j < MAX_DRIVES; ++j) { - struct ata_device * other = &tmp->drives[j]; + /* There's a small window between where the queue could be + * replugged while we are in here when using tcq (in which case + * the queue is probably empty anyways...), so check and leave + * if appropriate. When not using tcq, this is still a severe + * BUG! + */ - if (other->present) - ata_irq_enable(other, 0); + if (blk_queue_plugged(&drive->queue)) { + BUG_ON(!drive->using_tcq); + break; + } + + if (!(rq = elv_next_request(&drive->queue))) { + if (!ata_pending_commands(drive)) { + clear_bit(IDE_BUSY, ch->active); + if (drive->using_tcq) + ata_irq_enable(drive, 0); } + drive->rq = NULL; + + break; } + /* If there are queued commands, we can't start a + * non-fs request (really, a non-queuable command) + * until the queue is empty. + */ + if (!(rq->flags & REQ_CMD) && ata_pending_commands(drive)) + break; + + drive->rq = rq; + + spin_unlock(ch->lock); + /* allow other IRQs while we start this request */ + local_irq_enable(); + /* - * Feed commands to a drive until it barfs. + * This initiates handling of a new I/O request. */ - do { - if (!test_bit(IDE_BUSY, ch->active)) - printk(KERN_ERR "%s: error: not busy while queueing!\n", drive->name); - - /* Abort early if we can't queue another command. for - * non tcq, ata_can_queue is always 1 since we never - * get here unless the drive is idle. - */ - if (!ata_can_queue(drive)) { - if (!ata_pending_commands(drive)) - clear_bit(IDE_BUSY, ch->active); - break; - } - drive->sleep = 0; + BUG_ON(!(rq->flags & REQ_STARTED)); - if (test_bit(IDE_DMA, ch->active)) { - printk(KERN_ERR "%s: error: DMA in progress...\n", drive->name); - break; - } +#ifdef DEBUG + printk("%s: %s: current=0x%08lx\n", ch->name, __FUNCTION__, (unsigned long) rq); +#endif - /* There's a small window between where the queue could - * be replugged while we are in here when using tcq (in - * which case the queue is probably empty anyways...), - * so check and leave if appropriate. When not using - * tcq, this is still a severe BUG! - */ - if (blk_queue_plugged(&drive->queue)) { - BUG_ON(!drive->using_tcq); - break; - } + /* bail early if we've exceeded max_failures */ + if (drive->max_failures && (drive->failures > drive->max_failures)) + goto kill_rq; - if (!(rq = elv_next_request(&drive->queue))) { - if (!ata_pending_commands(drive)) - clear_bit(IDE_BUSY, ch->active); - drive->rq = NULL; - break; - } + block = rq->sector; - /* If there are queued commands, we can't start a - * non-fs request (really, a non-queuable command) - * until the queue is empty. - */ - if (!(rq->flags & REQ_CMD) && ata_pending_commands(drive)) - break; + /* Strange disk manager remap. + */ + if (rq->flags & REQ_CMD) + if (drive->type == ATA_DISK || drive->type == ATA_FLOPPY) + block += drive->sect0; + + /* Yecch - this will shift the entire interval, possibly killing some + * innocent following sector. + */ + if (block == 0 && drive->remap_0_to_1 == 1) + block = 1; /* redirect MBR access to EZ-Drive partn table */ + + ata_select(drive, 0); + ret = ata_status_poll(drive, drive->ready_stat, BUSY_STAT | DRQ_STAT, + WAIT_READY, rq); + + if (ret != ATA_OP_READY) { + printk(KERN_ERR "%s: drive not ready for command\n", drive->name); + + goto kill_rq; + } + + if (!ata_ops(drive)) { + printk(KERN_WARNING "%s: device type %d not supported\n", + drive->name, drive->type); + goto kill_rq; + } + + /* The normal way of execution is to pass and execute the request + * handler down to the device type driver. + */ + + if (ata_ops(drive)->do_request) { + ret = ata_ops(drive)->do_request(drive, rq, block); + } else { +kill_rq: + if (ata_ops(drive) && ata_ops(drive)->end_request) + ata_ops(drive)->end_request(drive, rq, 0); + else + ata_end_request(drive, rq, 0, 0); + ret = ATA_OP_FINISHED; - drive->rq = rq; + } + spin_lock_irq(ch->lock); - spin_unlock(ch->lock); - ide__sti(); /* allow other IRQs while we start this request */ - startstop = start_request(drive, rq); - spin_lock_irq(ch->lock); - - /* command started, we are busy */ - } while (startstop != ATA_OP_CONTINUES); - /* make sure the BUSY bit is set */ - /* FIXME: perhaps there is some place where we miss to set it? */ + /* continue if command started, so we are busy */ + } while (ret != ATA_OP_CONTINUES); + /* make sure the BUSY bit is set */ + /* FIXME: perhaps there is some place where we miss to set it? */ // set_bit(IDE_BUSY, ch->active); - } } void do_ide_request(request_queue_t *q) { - do_request(q->queuedata); + struct ata_channel *ch = q->queuedata; + + while (!test_and_set_bit(IDE_BUSY, ch->active)) { + do_request(ch); + } } /* @@ -908,7 +769,8 @@ * also be invoked as a result of a "sleep" operation triggered by the * mod_timer() call in do_request. * - * FIXME: this should take a drive context instead of a channel. + * FIXME: This should take a drive context instead of a channel. + * FIXME: This should not explicitly reenter the request handling engine. */ void ide_timer_expiry(unsigned long data) { @@ -928,6 +790,8 @@ * as timer expired), or we were "sleeping" to give other * devices a chance. Either way, we don't really want to * complain about anything. + * + * FIXME: Do we really still have to clear IDE_BUSY here? */ if (test_and_clear_bit(IDE_SLEEP, ch->active)) @@ -975,11 +839,11 @@ #else disable_irq(ch->irq); /* disable_irq_nosync ?? */ #endif - /* FIXME: IRQs are already disabled by spin_lock_irqsave() --bzolnier */ - __cli(); /* local CPU only, as if we were handling an interrupt */ + + local_irq_disable(); if (ch->poll_timeout) { ret = handler(drive, drive->rq); - } else if (drive_is_ready(drive)) { + } else if (ata_status_irq(drive)) { if (test_bit(IDE_DMA, ch->active)) udma_irq_lost(drive); (void) ide_ack_intr(ch); @@ -1026,11 +890,10 @@ enable_irq(ch->irq); spin_lock_irq(ch->lock); - if (ret == ATA_OP_FINISHED) - clear_bit(IDE_BUSY, ch->active); - - /* Reenter the request handling engine */ - do_request(ch); + if (ret == ATA_OP_FINISHED) { + /* Reenter the request handling engine. */ + do_request(ch); + } } spin_unlock_irqrestore(ch->lock, flags); } @@ -1045,7 +908,7 @@ * drive enters "idle", "standby", or "sleep" mode, so if the status looks * "good", we just ignore the interrupt completely. * - * This routine assumes __cli() is in effect when called. + * This routine assumes IRQ are disabled on entry. * * If an unexpected interrupt happens on irq15 while we are handling irq14 * and if the two interfaces are "serialized" (CMD640), then it looks like @@ -1060,38 +923,41 @@ */ static void unexpected_irq(int irq) { + /* Try to not flood the console with msgs */ + static unsigned long last_msgtime; /* = 0 */ + static int count; /* = 0 */ int i; for (i = 0; i < MAX_HWIFS; ++i) { struct ata_channel *ch = &ide_hwifs[i]; + int j; struct ata_device *drive; - if (!ch->present) + if (!ch->present || ch->irq != irq) continue; - if (ch->irq != irq) - continue; - - /* FIXME: this is a bit weak */ - drive = &ch->drives[0]; + for (j = 0; j < MAX_DRIVES; ++j) { + drive = &ch->drives[j]; - if (!ata_status(drive, READY_STAT, BAD_STAT)) { - /* Try to not flood the console with msgs */ - static unsigned long last_msgtime; - static int count; + /* this drive is idle */ + if (ata_status(drive, READY_STAT, BAD_STAT)) + continue; ++count; - if (time_after(jiffies, last_msgtime + HZ)) { - last_msgtime = jiffies; - printk("%s: unexpected interrupt, status=0x%02x, count=%d\n", - ch->name, drive->status, count); - } + + /* don't report too frequently */ + if (!time_after(jiffies, last_msgtime + HZ)) + continue; + + last_msgtime = jiffies; + printk("%s: unexpected interrupt, status=0x%02x, count=%d\n", + ch->name, drive->status, count); } } } /* - * Entry point for all interrupts, caller does __cli() for us. + * Entry point for all interrupts. Aussumes disabled IRQs. */ void ata_irq_request(int irq, void *data, struct pt_regs *regs) { @@ -1099,7 +965,7 @@ unsigned long flags; struct ata_device *drive; ata_handler_t *handler; - ide_startstop_t startstop; + ide_startstop_t ret; spin_lock_irqsave(ch->lock, flags); @@ -1107,80 +973,87 @@ goto out_lock; handler = ch->handler; - if (handler == NULL || ch->poll_timeout != 0) { + drive = ch->drive; + if (!handler || ch->poll_timeout) { #if 0 printk(KERN_INFO "ide: unexpected interrupt %d %d\n", ch->unit, irq); #endif + /* - * Not expecting an interrupt from this drive. - * That means this could be: - * (1) an interrupt from another PCI device - * sharing the same PCI INT# as us. - * or (2) a drive just entered sleep or standby mode, - * and is interrupting to let us know. - * or (3) a spurious interrupt of unknown origin. + * Not expecting an interrupt from this drive. That means this + * could be: * - * For PCI, we cannot tell the difference, - * so in that case we just ignore it and hope it goes away. + * - an interrupt from another PCI device sharing the same PCI + * INT# as us. + * + * - a drive just entered sleep or standby mode, and is + * interrupting to let us know. + * + * - a spurious interrupt of unknown origin. + * + * For PCI, we cannot tell the difference, so in that case we + * just clear it and hope it goes away. */ + #ifdef CONFIG_PCI if (ch->pci_dev && !ch->pci_dev->vendor) #endif - { - /* Probably not a shared PCI interrupt, so we can - * safely try to do something about it: - */ unexpected_irq(irq); #ifdef CONFIG_PCI - } else { - /* - * Whack the status register, just in case we have a leftover pending IRQ. - */ - IN_BYTE(ch->io_ports[IDE_STATUS_OFFSET]); + else + ata_status(drive, READY_STAT, BAD_STAT); #endif - } + goto out_lock; } - drive = ch->drive; - if (!drive_is_ready(drive)) { - /* - * This happens regularly when we share a PCI IRQ with another device. + if (!ata_status_irq(drive)) { + /* This happens regularly when we share a PCI IRQ with another device. * Unfortunately, it can also happen with some buggy drives that trigger * the IRQ before their status register is up to date. Hopefully we have * enough advance overhead that the latter isn't a problem. */ + goto out_lock; } + /* paranoia */ if (!test_and_set_bit(IDE_BUSY, ch->active)) - printk(KERN_ERR "%s: %s: hwgroup was not busy!?\n", drive->name, __FUNCTION__); + printk(KERN_ERR "%s: %s: channel was not busy!?\n", drive->name, __FUNCTION__); + ch->handler = NULL; del_timer(&ch->timer); spin_unlock(ch->lock); if (ch->unmask) - ide__sti(); + local_irq_enable(); - /* service this interrupt, may set handler for next interrupt */ - startstop = handler(drive, drive->rq); + /* + * Service this interrupt, this may setup handler for next interrupt. + */ + ret = handler(drive, drive->rq); spin_lock_irq(ch->lock); /* - * Note that handler() may have set things up for another - * interrupt to occur soon, but it cannot happen until - * we exit from this routine, because it will be the - * same irq as is currently being serviced here, and Linux - * won't allow another of the same (on any CPU) until we return. + * Note that handler() may have set things up for another interrupt to + * occur soon, but it cannot happen until we exit from this routine, + * because it will be the same irq as is currently being serviced here, + * and Linux won't allow another of the same (on any CPU) until we + * return. */ - if (startstop == ATA_OP_FINISHED) { - if (!ch->handler) { /* paranoia */ - clear_bit(IDE_BUSY, ch->active); + + if (ret == ATA_OP_FINISHED) { + + /* Reenter the request handling engine if we are not expecting + * another interrupt. + */ + + if (!ch->handler) do_request(ch); - } else { - printk("%s: %s: huh? expected NULL handler on exit\n", drive->name, __FUNCTION__); - } + else + printk("%s: %s: huh? expected NULL handler on exit\n", + drive->name, __FUNCTION__); } out_lock: @@ -1202,7 +1075,7 @@ */ #ifdef CONFIG_KMOD - if (drive->driver == NULL) { + if (!drive->driver) { char *module = NULL; switch (drive->type) { @@ -1263,6 +1136,7 @@ drive->usage--; if (ata_ops(drive) && ata_ops(drive)->release) ata_ops(drive)->release(inode, file, drive); + return 0; } @@ -1313,12 +1187,12 @@ } struct block_device_operations ide_fops[] = {{ - owner: THIS_MODULE, - open: ide_open, - release: ide_release, - ioctl: ata_ioctl, - check_media_change: ide_check_media_change, - revalidate: ata_revalidate + .owner = THIS_MODULE, + .open = ide_open, + .release = ide_release, + .ioctl = ata_ioctl, + .check_media_change = ide_check_media_change, + .revalidate = ata_revalidate }}; EXPORT_SYMBOL(ide_fops); @@ -1332,7 +1206,7 @@ EXPORT_SYMBOL(ata_dump); EXPORT_SYMBOL(ata_error); -EXPORT_SYMBOL(__ata_end_request); +EXPORT_SYMBOL(ata_end_request); EXPORT_SYMBOL(ide_stall_queue); EXPORT_SYMBOL(ide_setup_ports); diff -Nru a/drivers/ide/it8172.c b/drivers/ide/it8172.c --- a/drivers/ide/it8172.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/it8172.c Sat Jul 20 12:12:34 2002 @@ -225,12 +225,12 @@ /* module data table */ static struct ata_pci_device chipset __initdata = { - vendor: PCI_VENDOR_ID_ITE, - device: PCI_DEVICE_ID_ITE_IT8172G, - init_chipset: pci_init_it8172, - init_channel: ide_init_it8172, - exnablebits: {{0x00,0x00,0x00}, {0x40,0x00,0x01} }, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_ITE, + .device = PCI_DEVICE_ID_ITE_IT8172G, + .init_chipset = pci_init_it8172, + .init_channel = ide_init_it8172, + .exnablebits = {{0x00,0x00,0x00}, {0x40,0x00,0x01} }, + .bootable = ON_BOARD }; int __init init_it8172(void) diff -Nru a/drivers/ide/main.c b/drivers/ide/main.c --- a/drivers/ide/main.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/main.c Sat Jul 20 12:12:34 2002 @@ -140,12 +140,9 @@ * Setup hw_regs_t structure described by parameters. You may set up the hw * structure yourself OR use this routine to do it for you. */ -void ide_setup_ports(hw_regs_t *hw, - ide_ioreg_t base, - int *offsets, - ide_ioreg_t ctrl, - ide_ioreg_t intr, - ide_ack_intr_t *ack_intr, +void ide_setup_ports(hw_regs_t *hw, ide_ioreg_t base, int *offsets, + ide_ioreg_t ctrl, ide_ioreg_t intr, + int (*ack_intr)(struct ata_channel *), int irq) { int i; @@ -638,17 +635,6 @@ */ /* - * stridx() returns the offset of c within s, - * or -1 if c is '\0' or not found within s. - */ -static int __init stridx (const char *s, char c) -{ - char *i = strchr(s, c); - - return (i && c) ? i - s : -1; -} - -/* * Parsing for ide_setup(): * * 1. the first char of s must be '='. @@ -657,14 +643,14 @@ * 3. if the remainder is a series of no more than max_vals numbers * separated by commas, the numbers are saved in vals[] and a * count of how many were saved is returned. Base10 is assumed, - * and base16 is allowed when prefixed with "0x". + * and base16 is allowed when prefixed with "0x". The number of + * values read will be placed in vals[0], and the values read will + * placed in vals[1] to vals[max_vals]. * 4. otherwise, zero is returned. */ static int __init match_parm (char *s, const char *keywords[], int vals[], int max_vals) { - static const char decimal[] = "0123456789"; - static const char hex[] = "0123456789abcdef"; - int i, n; + int i; if (*s++ == '=') { /* @@ -683,23 +669,10 @@ * or base16 when prefixed with "0x". * Return a count of how many were found. */ - for (n = 0; (i = stridx(decimal, *s)) >= 0;) { - vals[n] = i; - while ((i = stridx(decimal, *++s)) >= 0) - vals[n] = (vals[n] * 10) + i; - if (*s == 'x' && !vals[n]) { - while ((i = stridx(hex, *++s)) >= 0) - vals[n] = (vals[n] * 0x10) + i; - } - if (++n == max_vals) - break; - if (*s == ',' || *s == ';') - ++s; - } - if (!*s) - return n; + get_options(s, max_vals+1, vals); + return vals[0]; } - return 0; /* zero = nothing matched */ + return 0; } /* @@ -744,7 +717,7 @@ */ int __init ide_setup(char *s) { - int i, vals[3]; + int i, vals[4]; struct ata_channel *ch; struct ata_device *drive; unsigned int hw, unit; @@ -755,7 +728,6 @@ return 0; if (strncmp(s,"ide",3) && - strncmp(s,"idebus",6) && strncmp(s,"hd",2)) /* hdx= & hdxlun= */ return 0; @@ -801,24 +773,24 @@ unit = unit % MAX_DRIVES; ch = &ide_hwifs[hw]; drive = &ch->drives[unit]; - if (!strncmp(s + 4, "ide-", 4)) { + if (!strncmp(s+3, "=ide-", 5)) { strncpy(drive->driver_req, s + 4, 9); goto done; } /* * Look for last lun option: "hdxlun=" */ - if (!strncmp(&s[3], "lun", 3)) { - if (match_parm(&s[6], NULL, vals, 1) != 1) + if (!strncmp(s+3, "lun=", 4)) { + if (*get_options(s+7, 2, vals) || vals[0]!=1) goto bad_option; - if (vals[0] >= 0 && vals[0] <= 7) { - drive->last_lun = vals[0]; + if (vals[1] >= 0 && vals[1] <= 7) { + drive->last_lun = vals[1]; drive->forced_lun = 1; } else printk(" -- BAD LAST LUN! Expected value from 0 to 7"); goto done; } - switch (match_parm(&s[3], hd_words, vals, 3)) { + switch (match_parm(s+3, hd_words, vals, 3)) { case -1: /* "none" */ drive->nobios = 1; /* drop into "noprobe" */ case -2: /* "noprobe" */ @@ -864,9 +836,9 @@ #endif case 3: /* cyl,head,sect */ drive->type = ATA_DISK; - drive->cyl = drive->bios_cyl = vals[0]; - drive->head = drive->bios_head = vals[1]; - drive->sect = drive->bios_sect = vals[2]; + drive->cyl = drive->bios_cyl = vals[1]; + drive->head = drive->bios_head = vals[2]; + drive->sect = drive->bios_sect = vals[3]; drive->present = 1; drive->forced_geom = 1; ch->noprobe = 0; @@ -879,10 +851,10 @@ /* * Look for bus speed option: "idebus=" */ - if (!strncmp(s, "idebus", 6)) { - if (match_parm(&s[6], NULL, vals, 1) != 1) + if (!strncmp(s, "idebus=", 7)) { + if (*get_options(s+7, 2, vals) || vals[0] != 1) goto bad_option; - idebus_parameter = vals[0]; + idebus_parameter = vals[1]; goto done; } @@ -892,33 +864,72 @@ if (!strncmp(s, "ide", 3) && s[3] >= '0' && s[3] <= max_ch) { /* * Be VERY CAREFUL changing this: note hardcoded indexes below - * -8,-9,-10. -11 : are reserved for future idex calls to ease the hardcoding. */ + const char *ide_options[] = { + "noprobe", "serialize", "autotune", "noautotune", "reset", "dma", "ata66", NULL }; const char *ide_words[] = { - "noprobe", "serialize", "autotune", "noautotune", "reset", "dma", "ata66", - "minus8", "minus9", "minus10", "minus11", "qd65xx", "ht6560b", "cmd640_vlb", "dtc2278", "umc8672", "ali14xx", "dc4030", NULL }; hw = s[3] - '0'; ch = &ide_hwifs[hw]; + + switch (match_parm(s+4, ide_options, vals, 1)) { + case -7: /* ata66 */ +#ifdef CONFIG_PCI + ch->udma_four = 1; + goto done; +#else + ch->udma_four = 0; + goto bad_channel; +#endif + case -6: /* dma */ + ch->autodma = 1; + goto done; + case -5: /* reset */ + ch->reset = 1; + goto done; + case -4: /* noautotune */ + ch->drives[0].autotune = 2; + ch->drives[1].autotune = 2; + goto done; + case -3: /* autotune */ + ch->drives[0].autotune = 1; + ch->drives[1].autotune = 1; + goto done; + case -2: /* "serialize" */ + do_serialize: + { + struct ata_channel *mate; + + mate = &ide_hwifs[hw ^ 1]; + ch->serialized = 1; + mate->serialized = 1; + } + goto done; + + case -1: /* "noprobe" */ + ch->noprobe = 1; + goto done; + } + i = match_parm(&s[4], ide_words, vals, 3); /* * Cryptic check to ensure chipset not already set for a channel: */ - if (i > 0 || i <= -11) { /* is parameter a chipset name? */ - if (ch->chipset != ide_unknown) + if (i) { /* is parameter a chipset name? */ + if (ide_hwifs[hw].chipset != ide_unknown) goto bad_option; /* chipset already specified */ - if (i <= -11 && i != -18 && hw != 0) + if (i != -7 && hw != 0) goto bad_channel; /* chipset drivers are for "ide0=" only */ - if (i <= -11 && i != -18 && ide_hwifs[hw+1].chipset != ide_unknown) + if (i != -7 && ide_hwifs[1].chipset != ide_unknown) goto bad_option; /* chipset for 2nd port already specified */ printk("\n"); } switch (i) { #ifdef CONFIG_BLK_DEV_PDC4030 - case -18: /* "dc4030" */ + case -7: /* "dc4030" */ { extern void init_pdc4030(void); init_pdc4030(); @@ -926,7 +937,7 @@ } #endif #ifdef CONFIG_BLK_DEV_ALI14XX - case -17: /* "ali14xx" */ + case -6: /* "ali14xx" */ { extern void init_ali14xx (void); init_ali14xx(); @@ -934,7 +945,7 @@ } #endif #ifdef CONFIG_BLK_DEV_UMC8672 - case -16: /* "umc8672" */ + case -5: /* "umc8672" */ { extern void init_umc8672 (void); init_umc8672(); @@ -942,7 +953,7 @@ } #endif #ifdef CONFIG_BLK_DEV_DTC2278 - case -15: /* "dtc2278" */ + case -4: /* "dtc2278" */ { extern void init_dtc2278 (void); init_dtc2278(); @@ -950,7 +961,7 @@ } #endif #ifdef CONFIG_BLK_DEV_CMD640 - case -14: /* "cmd640_vlb" */ + case -3: /* "cmd640_vlb" */ { extern int cmd640_vlb; /* flag for cmd640.c */ cmd640_vlb = 1; @@ -958,7 +969,7 @@ } #endif #ifdef CONFIG_BLK_DEV_HT6560B - case -13: /* "ht6560b" */ + case -2: /* "ht6560b" */ { extern void init_ht6560b (void); init_ht6560b(); @@ -966,64 +977,22 @@ } #endif #if CONFIG_BLK_DEV_QD65XX - case -12: /* "qd65xx" */ + case -1: /* "qd65xx" */ { extern void init_qd65xx (void); init_qd65xx(); goto done; } #endif - case -11: /* minus11 */ - case -10: /* minus10 */ - case -9: /* minus9 */ - case -8: /* minus8 */ - goto bad_option; - case -7: /* ata66 */ -#ifdef CONFIG_PCI - ch->udma_four = 1; - goto done; -#else - ch->udma_four = 0; - goto bad_channel; -#endif - case -6: /* dma */ - ch->autodma = 1; - goto done; - case -5: /* reset */ - ch->reset = 1; - goto done; - case -4: /* noautotune */ - ch->drives[0].autotune = 2; - ch->drives[1].autotune = 2; - goto done; - case -3: /* autotune */ - ch->drives[0].autotune = 1; - ch->drives[1].autotune = 1; - goto done; - case -2: /* "serialize" */ - do_serialize: - { - struct ata_channel *mate; - - mate = &ide_hwifs[hw ^ 1]; - ch->serialized = 1; - mate->serialized = 1; - } - goto done; - - case -1: /* "noprobe" */ - ch->noprobe = 1; - goto done; - case 1: /* base */ - vals[1] = vals[0] + 0x206; /* default ctl */ + vals[2] = vals[1] + 0x206; /* default ctl */ case 2: /* base,ctl */ - vals[2] = 0; /* default irq = probe for it */ + vals[3] = 0; /* default irq = probe for it */ case 3: /* base,ctl,irq */ - ch->hw.irq = vals[2]; - ide_init_hwif_ports(&ch->hw, (ide_ioreg_t) vals[0], (ide_ioreg_t) vals[1], &ch->irq); + ch->hw.irq = vals[3]; + ide_init_hwif_ports(&ch->hw, (ide_ioreg_t) vals[1], (ide_ioreg_t) vals[2], &ch->irq); memcpy(ch->io_ports, ch->hw.io_ports, sizeof(ch->io_ports)); - ch->irq = vals[2]; + ch->irq = vals[3]; ch->noprobe = 0; ch->chipset = ide_generic; goto done; diff -Nru a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c --- a/drivers/ide/ns87415.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/ns87415.c Sat Jul 20 12:12:35 2002 @@ -40,8 +40,7 @@ struct pci_dev *dev = hwif->pci_dev; unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); new = *old; /* Adjust IRQ enable bit */ @@ -75,7 +74,7 @@ udelay(10); } - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); } static void ns87415_selectproc(struct ata_device *drive) @@ -215,10 +214,10 @@ /* module data table */ static struct ata_pci_device chipset __initdata = { - vendor: PCI_VENDOR_ID_NS, - device: PCI_DEVICE_ID_NS_87415, - init_channel: ide_init_ns87415, - bootable: ON_BOARD, + .vendor = PCI_VENDOR_ID_NS, + .device = PCI_DEVICE_ID_NS_87415, + .init_channel = ide_init_ns87415, + .bootable = ON_BOARD, }; int __init init_ns87415(void) diff -Nru a/drivers/ide/opti621.c b/drivers/ide/opti621.c --- a/drivers/ide/opti621.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/opti621.c Sat Jul 20 12:12:34 2002 @@ -323,18 +323,18 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_OPTI, - device: PCI_DEVICE_ID_OPTI_82C621, - init_channel: ide_init_opti621, - enablebits: {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_OPTI, + .device = PCI_DEVICE_ID_OPTI_82C621, + .init_channel = ide_init_opti621, + .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_OPTI, - device: PCI_DEVICE_ID_OPTI_82C825, - init_channel: ide_init_opti621, - enablebits: {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_OPTI, + .device = PCI_DEVICE_ID_OPTI_82C825, + .init_channel = ide_init_opti621, + .enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}}, + .bootable = ON_BOARD }, }; diff -Nru a/drivers/ide/pcidma.c b/drivers/ide/pcidma.c --- a/drivers/ide/pcidma.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/pcidma.c Sat Jul 20 12:12:35 2002 @@ -46,7 +46,7 @@ if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) { if (!dma_stat) { - __ata_end_request(drive, rq, 1, rq->nr_sectors); + ata_end_request(drive, rq, 1, rq->nr_sectors); return ATA_OP_FINISHED; } @@ -510,7 +510,7 @@ void udma_pci_timeout(struct ata_device *drive) { - printk(KERN_ERR "ATA: UDMA timeout occured %s!\n", drive->name); + printk(KERN_ERR "%s: UDMA timeout!\n", drive->name); } void udma_pci_irq_lost(struct ata_device *drive) diff -Nru a/drivers/ide/pdc202xx.c b/drivers/ide/pdc202xx.c --- a/drivers/ide/pdc202xx.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/pdc202xx.c Sat Jul 20 12:12:35 2002 @@ -3,10 +3,11 @@ * linux/drivers/ide/pdc202xx.c Version 0.30 May. 28, 2002 * * Copyright (C) 1998-2000 Andre Hedrick - * Copyright (C) 2002 Bartlomiej Zolnierkiewicz + * Copyright (C) 2002 BartÅ^Âomiej Å»oÅ^Ânierkiewicz * - * Portions Copyright (C) 1999 Promise Technology, Inc. - * Author: Frank Tiernan (frankt@promise.com) + * Portions Copyright (C) 1999-2002 Promise Technology, Inc. + * Author: Frank Tiernan + * Hank Yang * * May be copied or modified under the terms of the GNU General Public License * @@ -22,11 +23,21 @@ * * The latest chipset code will support the following :: * Three Ultra33 controllers and 12 drives. + * * 8 are UDMA supported and 4 are limited to DMA mode 2 multi-word. * The 8/4 ratio is a BIOS code limit by promise. * * UNLESS you enable "CONFIG_PDC202XX_BURST" * + * History: + * Sync 2.5 driver with Promise 2.4 driver v1.20.0.7 (07/11/02): + * - Add PDC20271 support + * - Disable LBA48 support on PDC20262 + * - Fix ATAPI UDMA port value + * - Add new quirk drive + * - Adjust timings for all drives when using ATA133 + * - Update pdc202xx_reset() waiting time + * */ #include @@ -73,24 +84,24 @@ }; static struct pdc_bit_messages pdc_reg_A[] = { - { 0x80, "SYNC_IN" }, - { 0x40, "ERRDY_EN" }, - { 0x20, "IORDY_EN" }, - { 0x10, "PREFETCH_EN" }, + {0x80, "SYNC_IN"}, + {0x40, "ERRDY_EN"}, + {0x20, "IORDY_EN"}, + {0x10, "PREFETCH_EN"}, /* PA3-PA0 - PIO "A" timing */ }; static struct pdc_bit_messages pdc_reg_B[] = { /* MB2-MB0 - DMA "B" timing */ - { 0x10, "PIO_FORCED/PB4" }, /* PIO_FORCE 1:0 */ + {0x10, "PIO_FORCED/PB4"}, /* PIO_FORCE 1:0 */ /* PB3-PB0 - PIO "B" timing */ }; static struct pdc_bit_messages pdc_reg_C[] = { - { 0x80, "DMARQp" }, - { 0x40, "IORDYp" }, - { 0x20, "DMAR_EN" }, - { 0x10, "DMAW_EN" }, + {0x80, "DMARQp"}, + {0x40, "IORDYp"}, + {0x20, "DMAR_EN"}, + {0x10, "DMAW_EN"}, /* MC3-MC0 - DMA "C" timing */ }; @@ -106,7 +117,9 @@ printk(KERN_DEBUG " }\n"); } -#endif /* PDC202XX_DECODE_REGISTER_INFO */ +#endif /* PDC202XX_DECODE_REGISTER_INFO */ + +static struct ata_device *drives[4]; int check_in_drive_lists(struct ata_device *drive) { @@ -115,12 +128,14 @@ "QUANTUM FIREBALLP KA6.4", "QUANTUM FIREBALLP KA9.1", "QUANTUM FIREBALLP LM20.4", + "QUANTUM FIREBALLP KX13.6", "QUANTUM FIREBALLP KX20.5", "QUANTUM FIREBALLP KX27.3", "QUANTUM FIREBALLP LM20.5", NULL }; - const char**list = pdc_quirk_drives; + + const char **list = pdc_quirk_drives; struct hd_driveid *id = drive->id; while (*list) @@ -133,24 +148,27 @@ { int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA; - switch(ch->pci_dev->device) { - case PCI_DEVICE_ID_PROMISE_20276: - case PCI_DEVICE_ID_PROMISE_20275: - case PCI_DEVICE_ID_PROMISE_20269: - map |= XFER_UDMA_133; - case PCI_DEVICE_ID_PROMISE_20268R: - case PCI_DEVICE_ID_PROMISE_20268: - map &= ~XFER_SWDMA; - case PCI_DEVICE_ID_PROMISE_20267: - case PCI_DEVICE_ID_PROMISE_20265: - map |= XFER_UDMA_100; - case PCI_DEVICE_ID_PROMISE_20262: - map |= XFER_UDMA_66; - - if (!ch->udma_four) { - printk(KERN_WARNING "%s: 40-pin cable, speed reduced to UDMA(33) mode.\n", ch->name); - map &= ~XFER_UDMA_80W; - } + switch (ch->pci_dev->device) { + case PCI_DEVICE_ID_PROMISE_20276: + case PCI_DEVICE_ID_PROMISE_20275: + case PCI_DEVICE_ID_PROMISE_20271: + case PCI_DEVICE_ID_PROMISE_20269: + map |= XFER_UDMA_133; + case PCI_DEVICE_ID_PROMISE_20268R: + case PCI_DEVICE_ID_PROMISE_20268: + map &= ~XFER_SWDMA; + case PCI_DEVICE_ID_PROMISE_20267: + case PCI_DEVICE_ID_PROMISE_20265: + map |= XFER_UDMA_100; + case PCI_DEVICE_ID_PROMISE_20262: + map |= XFER_UDMA_66; + + if (!ch->udma_four) { + printk(KERN_WARNING + "%s: 40-pin cable, speed reduced to UDMA(33) mode.\n", + ch->name); + map &= ~XFER_UDMA_80W; + } } return map; @@ -175,38 +193,89 @@ pci_read_config_byte(dev, drive_pci + 1, &BP); pci_read_config_byte(dev, drive_pci + 2, &CP); - switch(speed) { + switch (speed) { #ifdef CONFIG_BLK_DEV_IDEDMA - case XFER_UDMA_5: - case XFER_UDMA_4: TB = 0x20; TC = 0x01; break; - case XFER_UDMA_3: TB = 0x40; TC = 0x02; break; - case XFER_UDMA_2: TB = 0x20; TC = 0x01; break; - case XFER_UDMA_1: TB = 0x40; TC = 0x02; break; - case XFER_UDMA_0: TB = 0x60; TC = 0x03; break; - case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break; - case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break; - case XFER_MW_DMA_0: TB = 0x60; TC = 0x05; break; - case XFER_SW_DMA_2: TB = 0x60; TC = 0x05; break; - case XFER_SW_DMA_1: TB = 0x80; TC = 0x06; break; - case XFER_SW_DMA_0: TB = 0xC0; TC = 0x0B; break; -#endif - case XFER_PIO_4: TA = 0x01; TB = 0x04; break; - case XFER_PIO_3: TA = 0x02; TB = 0x06; break; - case XFER_PIO_2: TA = 0x03; TB = 0x08; break; - case XFER_PIO_1: TA = 0x05; TB = 0x0C; break; - case XFER_PIO_0: - default: TA = 0x09; TB = 0x13; break; + case XFER_UDMA_5: + case XFER_UDMA_4: + TB = 0x20; + TC = 0x01; + break; + case XFER_UDMA_3: + TB = 0x40; + TC = 0x02; + break; + case XFER_UDMA_2: + TB = 0x20; + TC = 0x01; + break; + case XFER_UDMA_1: + TB = 0x40; + TC = 0x02; + break; + case XFER_UDMA_0: + TB = 0x60; + TC = 0x03; + break; + case XFER_MW_DMA_2: + TB = 0x60; + TC = 0x03; + break; + case XFER_MW_DMA_1: + TB = 0x60; + TC = 0x04; + break; + case XFER_MW_DMA_0: + TB = 0x60; + TC = 0x05; + break; + case XFER_SW_DMA_2: + TB = 0x60; + TC = 0x05; + break; + case XFER_SW_DMA_1: + TB = 0x80; + TC = 0x06; + break; + case XFER_SW_DMA_0: + TB = 0xC0; + TC = 0x0B; + break; +#endif + case XFER_PIO_4: + TA = 0x01; + TB = 0x04; + break; + case XFER_PIO_3: + TA = 0x02; + TB = 0x06; + break; + case XFER_PIO_2: + TA = 0x03; + TB = 0x08; + break; + case XFER_PIO_1: + TA = 0x05; + TB = 0x0C; + break; + case XFER_PIO_0: + default: + TA = 0x09; + TB = 0x13; + break; } #ifdef CONFIG_BLK_DEV_IDEDMA - if (speed >= XFER_SW_DMA_0) { - pci_write_config_byte(dev, drive_pci + 1, (BP & ~0xf0) | TB); - pci_write_config_byte(dev, drive_pci + 2, (CP & ~0x0f) | TC); + if (speed >= XFER_SW_DMA_0) { + pci_write_config_byte(dev, drive_pci + 1, + (BP & ~0xf0) | TB); + pci_write_config_byte(dev, drive_pci + 2, + (CP & ~0x0f) | TC); } else #endif { pci_write_config_byte(dev, drive_pci, (AP & ~0x0f) | TA); - pci_write_config_byte(dev, drive_pci + 1, (BP & ~0x07) | TB); + pci_write_config_byte(dev, drive_pci + 1, + (BP & ~0x07) | TB); } #if PDC202XX_DECODE_REGISTER_INFO @@ -219,7 +288,7 @@ pdc_dump_bits(pdc_reg_A, AP); printk(KERN_DEBUG "BP(%x): DMA(B) = %d PIO(B) = %d\n", - BP, (BP & 0xe0) >> 5, BP & 0x0f); + BP, (BP & 0xe0) >> 5, BP & 0x0f); pdc_dump_bits(pdc_reg_B, BP); printk(KERN_DEBUG "CP(%x): DMA(C) = %d\n", CP, CP & 0x0f); @@ -230,9 +299,8 @@ #if PDC202XX_DEBUG_DRIVE_INFO printk("%s: %02x drive%d 0x%08x ", - drive->name, speed, - drive->dn, drive_conf); - pci_read_config_dword(dev, drive_pci, &drive_conf); + drive->name, speed, drive->dn, drive_conf); + pci_read_config_dword(dev, drive_pci, &drive_conf); printk("0x%08x\n", drive_conf); #endif @@ -250,25 +318,46 @@ static int pdc202xx_new_tune_chipset(struct ata_device *drive, byte speed) { struct ata_channel *hwif = drive->channel; -#ifdef CONFIG_BLK_DEV_IDEDMA - unsigned long indexreg = (hwif->dma_base + 1); - unsigned long datareg = (hwif->dma_base + 3); -#else u32 high_16 = pci_resource_start(hwif->pci_dev, 4); - unsigned long indexreg = high_16 + (hwif->unit ? 0x09 : 0x01); - unsigned long datareg = (indexreg + 2); -#endif /* CONFIG_BLK_DEV_IDEDMA */ - byte thold = 0x10; - byte adj = (drive->dn%2) ? 0x08 : 0x00; + u32 indexreg = high_16 + (hwif->unit ? 0x09 : 0x01); + u32 datareg = indexreg + 2; + + u8 adj = (drive->dn % 2) ? 0x08 : 0x00; + u8 thold = 0x10; + int err, i, j = hwif->unit ? 2 : 0; #ifdef CONFIG_BLK_DEV_IDEDMA + /* Setting tHOLD bit to 0 if using UDMA mode 2 */ if (speed == XFER_UDMA_2) { OUT_BYTE((thold + adj), indexreg); OUT_BYTE((IN_BYTE(datareg) & 0x7f), datareg); } - switch (speed) { - case XFER_UDMA_7: - speed = XFER_UDMA_6; +#endif + + for (i = 0; i < 2; i++) + if (hwif->drives[i].present) + drives[i + j] = &hwif->drives[i]; + + err = ide_config_drive_speed(drive, speed); + + /* For modes < UDMA mode 6 we need only to SET_FEATURE */ + if (speed < XFER_UDMA_6) + return err; + + /* We need to adjust timings to ATA133 clock if ATA133 drives exist */ + for (i = 0; i < 4; i++) { + if (!drives[i]) + continue; + + /* Primary = 0x01, Secondary = 0x09 */ + indexreg = high_16 + ((i > 1) ? 0x09 : 0x01); + datareg = indexreg + 2; + + /* Master = 0x00, Slave = 0x08 */ + adj = (i % 2) ? 0x08 : 0x00; + + switch (drives[i]->current_speed) { +#ifdef CONFIG_BLK_DEV_IDEDMA case XFER_UDMA_6: set_2regs(0x10, 0x1a); set_2regs(0x11, 0x01); @@ -316,9 +405,7 @@ set_2regs(0x0e, 0xdf); set_2regs(0x0f, 0x5f); break; -#else - switch (speed) { -#endif /* CONFIG_BLK_DEV_IDEDMA */ +#endif case XFER_PIO_4: set_2regs(0x0c, 0x23); set_2regs(0x0d, 0x09); @@ -346,9 +433,10 @@ break; default: ; + } } - return ide_config_drive_speed(drive, speed); + return err; } /* 0 1 2 3 4 5 6 7 8 @@ -364,7 +452,8 @@ if (pio == 255) speed = ata_best_pio_mode(drive); - else speed = XFER_PIO_0 + min_t(byte, pio, 4); + else + speed = XFER_PIO_0 + min_t(byte, pio, 4); pdc202xx_tune_chipset(drive, speed); } @@ -383,7 +472,7 @@ /* IORDY_EN & PREFETCH_EN */ if (id->capability & 4) - set_2regs(0x13, (IN_BYTE(datareg)|0x03)); + set_2regs(0x13, (IN_BYTE(datareg) | 0x03)); return udma_generic_setup(drive, map); } @@ -392,7 +481,7 @@ { struct hd_driveid *id = drive->id; struct ata_channel *hwif = drive->channel; - struct hd_driveid *mate_id = hwif->drives[!(drive->dn%2)].id; + struct hd_driveid *mate_id = hwif->drives[!(drive->dn % 2)].id; struct pci_dev *dev = hwif->pci_dev; u32 high_16 = pci_resource_start(dev, 4); u32 drive_conf; @@ -431,30 +520,32 @@ goto chipset_is_set; /* FIXME: what if SYNC_ERRDY is enabled for slave - and disabled for master? --bkz */ + and disabled for master? --bkz */ pci_read_config_byte(dev, drive_pci, &AP); /* enable SYNC_ERRDY for master and slave (if enabled for master) */ if (!(AP & SYNC_ERRDY_EN)) { if (!(drive->dn % 2)) { - pci_write_config_byte(dev, drive_pci, AP|SYNC_ERRDY_EN); + pci_write_config_byte(dev, drive_pci, + AP | SYNC_ERRDY_EN); } else { pci_read_config_byte(dev, drive_pci - 4, &tmp); if (tmp & SYNC_ERRDY_EN) - pci_write_config_byte(dev, drive_pci, AP|SYNC_ERRDY_EN); + pci_write_config_byte(dev, drive_pci, + AP | SYNC_ERRDY_EN); } } -chipset_is_set: + chipset_is_set: if (drive->type != ATA_DISK) return 0; pci_read_config_byte(dev, drive_pci, &AP); - if (id->capability & 4) /* IORDY_EN */ - pci_write_config_byte(dev, drive_pci, AP|IORDY_EN); + if (id->capability & 4) /* IORDY_EN */ + pci_write_config_byte(dev, drive_pci, AP | IORDY_EN); pci_read_config_byte(dev, drive_pci, &AP); if (drive->type == ATA_DISK) /* PREFETCH_EN */ - pci_write_config_byte(dev, drive_pci, AP|PREFETCH_EN); + pci_write_config_byte(dev, drive_pci, AP | PREFETCH_EN); map = hwif->modes_map; @@ -471,20 +562,25 @@ return udma_generic_setup(drive, map); } -static void pdc202xx_udma_start(struct ata_device *drive, struct request *rq) +static void pdc202xx_udma_start(struct ata_device *drive, + struct request *rq) { struct ata_channel *ch = drive->channel; u32 high_16 = pci_resource_start(ch->pci_dev, 4); - unsigned long atapi_reg = high_16 + (ch->unit ? 0x24 : 0x00); + unsigned long atapi_port = high_16 + (ch->unit ? 0x24 : 0x20); + /* Enable ATAPI UDMA port for 48bit data on PDC20265/PDC20267 */ if (drive->addressing) { - unsigned long word_count = 0; - u8 clock = IN_BYTE(high_16 + PDC_CLK); + unsigned long word_count = 0, hankval; + u32 clockreg = high_16 + PDC_CLK; + u8 clock = IN_BYTE(clockreg); - outb(clock|(ch->unit ? 0x08 : 0x02), high_16 + PDC_CLK); + OUT_BYTE(clock | (ch->unit ? 0x08 : 0x02), clockreg); word_count = (rq->nr_sectors << 8); - word_count = (rq_data_dir(rq) == READ) ? word_count | 0x05000000 : word_count | 0x06000000; - outl(word_count, atapi_reg); + hankval = + (rq_data_dir(rq) == READ) ? 0x05 << 24 : 0x06 << 24; + hankval |= word_count; + outl(hankval, atapi_port); } /* Note that this is done *after* the cmd has been issued to the drive, @@ -492,27 +588,31 @@ * when we do this part before issuing the drive cmd. */ - outb(inb(ch->dma_base) | 1, ch->dma_base); /* start DMA */ + outb(inb(ch->dma_base) | 1, ch->dma_base); /* start DMA */ } static int pdc202xx_udma_stop(struct ata_device *drive) { struct ata_channel *ch = drive->channel; u32 high_16 = pci_resource_start(ch->pci_dev, 4); - unsigned long atapi_reg = high_16 + (ch->unit ? 0x24 : 0x00); + unsigned long atapi_port = high_16 + (ch->unit ? 0x24 : 0x20); unsigned long dma_base = ch->dma_base; - u8 dma_stat, clock; + u8 dma_stat; + /* Disable ATAPI UDMA port for 48bit data on PDC20265/PDC20267 */ if (drive->addressing) { - outl(0, atapi_reg); /* zero out extra */ - clock = IN_BYTE(high_16 + PDC_CLK); - OUT_BYTE(clock & ~(ch->unit ? 0x08:0x02), high_16 + PDC_CLK); + u32 clockreg = high_16 + PDC_CLK; + u8 clock; + + outl(0, atapi_port); /* zero out extra */ + clock = IN_BYTE(clockreg); + OUT_BYTE(clock & ~(ch->unit ? 0x08 : 0x02), clockreg); } - outb(inb(dma_base)&~1, dma_base); /* stop DMA */ - dma_stat = inb(dma_base+2); /* get DMA status */ - outb(dma_stat|6, dma_base+2); /* clear the INTR & ERROR bits */ - udma_destroy_table(ch); /* purge DMA mappings */ + outb(inb(dma_base) & ~1, dma_base); /* stop DMA */ + dma_stat = inb(dma_base + 2); /* get DMA status */ + outb(dma_stat | 6, dma_base + 2); /* clear the INTR & ERROR bits */ + udma_destroy_table(ch); /* purge DMA mappings */ return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; /* verify good DMA status */ } @@ -527,21 +627,21 @@ #endif -static void pdc202xx_new_reset(struct ata_device *drive) +/* FIXME: use generic ata_reset() --bzolnier */ +static void pdc202xx_reset(struct ata_device *drive) { - ata_reset(drive->channel); - mdelay(1000); - ata_irq_enable(drive, 1); - mdelay(1000); + outb(0x04, drive->channel->io_ports[IDE_CONTROL_OFFSET]); + udelay(10); + outb(0x00, drive->channel->io_ports[IDE_CONTROL_OFFSET]); printk(KERN_INFO "PDC202XX: %s channel reset.\n", - drive->channel->unit ? "Secondary" : "Primary"); + drive->channel->unit ? "Secondary" : "Primary"); } /* * software host reset * * BIOS will set UDMA timing on if the drive supports it. - * The user may then want to turn it off. A bug is that + * The user may then want to turn it off. A bug is * that device cannot handle a downgrade in timing from * UDMA to DMA. Disk accesses after issuing a set * feature command will result in errors. @@ -549,6 +649,7 @@ * A software reset leaves the timing registers intact, * but resets the drives on both channels. */ +#if 0 static void pdc202xx_reset_host(struct pci_dev *dev) { u32 high_16 = pci_resource_start(dev, 4); @@ -566,62 +667,79 @@ printk(KERN_INFO "%s: channel needs reset.\n", ch->name); pdc202xx_reset_host(ch->pci_dev); } +#endif static unsigned int __init pdc202xx_init_chipset(struct pci_dev *dev) { u32 high_16 = pci_resource_start(dev, 4); - u8 burst = IN_BYTE(high_16 + PDC_UDMA); + u32 burstreg = high_16 + PDC_UDMA; + u8 burst = IN_BYTE(burstreg); + + set_reg_and_wait(burst | 0x10, burstreg, 100); + /* FIXME: 2 seconds ?! */ + set_reg_and_wait(burst & ~0x10, burstreg, 2000); if (dev->resource[PCI_ROM_RESOURCE].start) { - pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); - printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", dev->name, dev->resource[PCI_ROM_RESOURCE].start); + pci_write_config_dword(dev, PCI_ROM_ADDRESS, + dev->resource[PCI_ROM_RESOURCE]. + start | PCI_ROM_ADDRESS_ENABLE); + printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", dev->name, + dev->resource[PCI_ROM_RESOURCE].start); } - +#if 0 switch (dev->device) { - case PCI_DEVICE_ID_PROMISE_20267: - case PCI_DEVICE_ID_PROMISE_20265: - case PCI_DEVICE_ID_PROMISE_20262: - pdc202xx_reset_host(dev); - break; - default: - /* FIXME: only checked for 20246 - is this right?, - if it is needed it should go to ide-pci --bkz */ - if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) { - byte irq = 0, irq2 = 0; - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); - pci_read_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, &irq2); /* 0xbc */ - if (irq != irq2) { - pci_write_config_byte(dev, (PCI_INTERRUPT_LINE)|0x80, irq); /* 0xbc */ - printk("%s: pci-config space interrupt mirror fixed.\n", dev->name); - } + case PCI_DEVICE_ID_PROMISE_20267: + case PCI_DEVICE_ID_PROMISE_20265: + case PCI_DEVICE_ID_PROMISE_20262: + pdc202xx_reset_host(dev); + break; + default: + if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) { + byte irq = 0, irq2 = 0; + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, + &irq); + pci_read_config_byte(dev, (PCI_INTERRUPT_LINE) | 0x80, &irq2); /* 0xbc */ + if (irq != irq2) { + pci_write_config_byte(dev, (PCI_INTERRUPT_LINE) | 0x80, irq); /* 0xbc */ + printk + ("%s: pci-config space interrupt mirror fixed.\n", + dev->name); } - break; + } + break; } +#endif #ifdef CONFIG_PDC202XX_BURST if (!(burst & 1)) { printk(KERN_INFO "%s: forcing (U)DMA BURST.\n", dev->name); - OUT_BYTE(burst | 1, high_16 + PDC_UDMA); + OUT_BYTE(burst | 1, burstreg); + burst = IN_BYTE(burstreg); } #endif printk(KERN_INFO "%s: (U)DMA BURST %sabled, " - "primary %s mode, secondary %s mode.\n", + "primary %s mode, secondary %s mode.\n", dev->name, (burst & 1) ? "en" : "dis", (IN_BYTE(high_16 + PDC_PRIMARY) & 1) ? "MASTER" : "PCI", - (IN_BYTE(high_16 + PDC_SECONDARY) & 1) ? "MASTER" : "PCI" ); + (IN_BYTE(high_16 + PDC_SECONDARY) & 1) ? "MASTER" : "PCI"); return dev->irq; } +#if 0 /* chipsets newer then 20267 */ static unsigned int __init pdc202xx_tx_init_chipset(struct pci_dev *dev) { if (dev->resource[PCI_ROM_RESOURCE].start) { - pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); - printk(KERN_INFO "%s: ROM enabled at 0x%08lx.\n", dev->name, dev->resource[PCI_ROM_RESOURCE].start); + pci_write_config_dword(dev, PCI_ROM_ADDRESS, + dev->resource[PCI_ROM_RESOURCE]. + start | PCI_ROM_ADDRESS_ENABLE); + printk(KERN_INFO "%s: ROM enabled at 0x%08lx.\n", + dev->name, dev->resource[PCI_ROM_RESOURCE].start); } return dev->irq; } +#endif static unsigned int __init pdc202xx_ata66_check(struct ata_channel *ch) { @@ -640,46 +758,46 @@ static void __init ide_init_pdc202xx(struct ata_channel *hwif) { - hwif->tuneproc = &pdc202xx_tune_drive; + hwif->tuneproc = &pdc202xx_tune_drive; hwif->quirkproc = &check_in_drive_lists; + hwif->resetproc = &pdc202xx_reset; - switch(hwif->pci_dev->device) { - case PCI_DEVICE_ID_PROMISE_20275: - case PCI_DEVICE_ID_PROMISE_20276: - case PCI_DEVICE_ID_PROMISE_20269: - case PCI_DEVICE_ID_PROMISE_20268: - case PCI_DEVICE_ID_PROMISE_20268R: - hwif->udma_four = pdc202xx_tx_ata66_check(hwif); + switch (hwif->pci_dev->device) { + case PCI_DEVICE_ID_PROMISE_20276: + case PCI_DEVICE_ID_PROMISE_20275: + case PCI_DEVICE_ID_PROMISE_20271: + case PCI_DEVICE_ID_PROMISE_20269: + case PCI_DEVICE_ID_PROMISE_20268: + case PCI_DEVICE_ID_PROMISE_20268R: + hwif->udma_four = pdc202xx_tx_ata66_check(hwif); - hwif->speedproc = &pdc202xx_new_tune_chipset; - hwif->resetproc = &pdc202xx_new_reset; + hwif->speedproc = &pdc202xx_new_tune_chipset; #ifdef CONFIG_BLK_DEV_IDEDMA - if (hwif->dma_base) - hwif->udma_setup = pdc202xx_tx_udma_setup; + if (hwif->dma_base) + hwif->udma_setup = pdc202xx_tx_udma_setup; #endif - break; - case PCI_DEVICE_ID_PROMISE_20267: - case PCI_DEVICE_ID_PROMISE_20265: - case PCI_DEVICE_ID_PROMISE_20262: - hwif->udma_four = pdc202xx_ata66_check(hwif); - - hwif->resetproc = &pdc202xx_reset; + break; + case PCI_DEVICE_ID_PROMISE_20267: + case PCI_DEVICE_ID_PROMISE_20265: #ifdef CONFIG_BLK_DEV_IDEDMA - /* we need special functions for lba48 */ - if (hwif->dma_base) { - hwif->udma_start = pdc202xx_udma_start; - hwif->udma_stop = pdc202xx_udma_stop; - } + /* we need special functions for lba48 */ + if (hwif->dma_base) { + hwif->udma_start = pdc202xx_udma_start; + hwif->udma_stop = pdc202xx_udma_stop; + } #endif - /* FIXME: check whether 20246 works with lba48 --bkz */ - case PCI_DEVICE_ID_PROMISE_20246: + /* PDC20262 doesn't support LBA48 */ + case PCI_DEVICE_ID_PROMISE_20262: + hwif->udma_four = pdc202xx_ata66_check(hwif); + + case PCI_DEVICE_ID_PROMISE_20246: #ifdef CONFIG_BLK_DEV_IDEDMA - if (hwif->dma_base) - hwif->udma_setup = pdc202xx_udma_setup; + if (hwif->dma_base) + hwif->udma_setup = pdc202xx_udma_setup; #endif - hwif->speedproc = &pdc202xx_tune_chipset; - default: - break; + hwif->speedproc = &pdc202xx_tune_chipset; + default: + break; } #ifdef CONFIG_BLK_DEV_IDEDMA @@ -700,99 +818,97 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20246, - init_chipset: pdc202xx_init_chipset, - init_channel: ide_init_pdc202xx, + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20246, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, #ifndef CONFIG_PDC202XX_FORCE - enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, + .enablebits = {{0x50, 0x02, 0x02}, {0x50, 0x04, 0x04}}, #endif - bootable: OFF_BOARD, - extra: 16, - flags: ATA_F_IRQ | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20262, - init_chipset: pdc202xx_init_chipset, - init_channel: ide_init_pdc202xx, + .bootable = OFF_BOARD, + .extra = 16, + .flags = ATA_F_IRQ | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20262, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, #ifndef CONFIG_PDC202XX_FORCE - enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, + .enablebits = {{0x50, 0x02, 0x02}, {0x50, 0x04, 0x04}}, #endif - bootable: OFF_BOARD, - extra: 48, - flags: ATA_F_IRQ | ATA_F_PHACK | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20265, - init_chipset: pdc202xx_init_chipset, - init_channel: ide_init_pdc202xx, + .bootable = OFF_BOARD, + .extra = 48, + .flags = ATA_F_IRQ | ATA_F_PHACK | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20265, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, #ifndef CONFIG_PDC202XX_FORCE - enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, - bootable: OFF_BOARD, + .enablebits = {{0x50, 0x02, 0x02}, {0x50, 0x04, 0x04}}, + .bootable = OFF_BOARD, #else - bootable: ON_BOARD, + .bootable = ON_BOARD, #endif - extra: 48, - flags: ATA_F_IRQ | ATA_F_PHACK | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20267, - init_chipset: pdc202xx_init_chipset, - init_channel: ide_init_pdc202xx, + .extra = 48, + .flags = ATA_F_IRQ | ATA_F_PHACK | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20267, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, #ifndef CONFIG_PDC202XX_FORCE - enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, + .enablebits = {{0x50, 0x02, 0x02}, {0x50, 0x04, 0x04}}, #endif - bootable: OFF_BOARD, - extra: 48, - flags: ATA_F_IRQ | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20268, - init_chipset: pdc202xx_tx_init_chipset, - init_channel: ide_init_pdc202xx, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA - }, + .bootable = OFF_BOARD, + .extra = 48, + .flags = ATA_F_IRQ | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20268, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, /* Promise used a different PCI identification for the raid card * apparently to try and prevent Linux detecting it and using our own * raid code. We want to detect it for the ataraid drivers, so we have * to list both here.. */ { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20268R, - init_chipset: pdc202xx_tx_init_chipset, - init_channel: ide_init_pdc202xx, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20269, - init_chipset: pdc202xx_tx_init_chipset, - init_channel: ide_init_pdc202xx, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20275, - init_chipset: pdc202xx_tx_init_chipset, - init_channel: ide_init_pdc202xx, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA - }, - { - vendor: PCI_VENDOR_ID_PROMISE, - device: PCI_DEVICE_ID_PROMISE_20276, - init_chipset: pdc202xx_tx_init_chipset, - init_channel: ide_init_pdc202xx, - bootable: OFF_BOARD, - flags: ATA_F_IRQ | ATA_F_DMA - }, + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20268R, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20269, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20271, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20275, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, + { + .vendor = PCI_VENDOR_ID_PROMISE, + .device = PCI_DEVICE_ID_PROMISE_20276, + .init_chipset = pdc202xx_init_chipset, + .init_channel = ide_init_pdc202xx, + .bootable = OFF_BOARD, + .flags = ATA_F_IRQ | ATA_F_DMA}, }; int __init init_pdc202xx(void) @@ -802,5 +918,5 @@ for (i = 0; i < ARRAY_SIZE(chipsets); ++i) ata_register_chipset(&chipsets[i]); - return 0; + return 0; } diff -Nru a/drivers/ide/pdc4030.c b/drivers/ide/pdc4030.c --- a/drivers/ide/pdc4030.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/pdc4030.c Sat Jul 20 12:12:35 2002 @@ -106,26 +106,24 @@ { unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); inb(IDE_NSECTOR_REG); inb(IDE_NSECTOR_REG); inb(IDE_NSECTOR_REG); insl(IDE_DATA_REG, buffer, wcount); - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); } static void write_vlb(struct ata_device *drive, void *buffer, unsigned int wcount) { unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); inb(IDE_NSECTOR_REG); inb(IDE_NSECTOR_REG); inb(IDE_NSECTOR_REG); outsl(IDE_DATA_REG, buffer, wcount); - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); } static void read_16(struct ata_device *drive, void *buffer, unsigned int wcount) @@ -415,7 +413,7 @@ rq->nr_sectors -= nsect; total_remaining = rq->nr_sectors; if ((rq->current_nr_sectors -= nsect) <= 0) - __ata_end_request(drive, rq, 1, 0); + ata_end_request(drive, rq, 1, 0); /* * Now the data has been read in, do the following: @@ -477,7 +475,7 @@ #ifdef DEBUG_WRITE printk(KERN_DEBUG "%s: Write complete - end_request\n", drive->name); #endif - __ata_end_request(drive, rq, 1, rq->nr_sectors); + ata_end_request(drive, rq, 1, rq->nr_sectors); return ATA_OP_FINISHED; } @@ -629,7 +627,7 @@ /* Check that it's a regular command. If not, bomb out early. */ if (!(rq->flags & REQ_CMD)) { blk_dump_rq_flags(rq, "pdc4030 bad flags"); - __ata_end_request(drive, rq, 0, 0); + ata_end_request(drive, rq, 0, 0); return ATA_OP_FINISHED; } @@ -701,7 +699,7 @@ return ret; } if (!drive->channel->unmask) - __cli(); /* local CPU only */ + local_irq_disable(); return promise_do_write(drive, rq); } @@ -709,7 +707,7 @@ default: printk(KERN_ERR "pdc4030: command not READ or WRITE! Huh?\n"); - __ata_end_request(drive, rq, 0, 0); + ata_end_request(drive, rq, 0, 0); return ATA_OP_FINISHED; } } diff -Nru a/drivers/ide/pdcraid.c b/drivers/ide/pdcraid.c --- a/drivers/ide/pdcraid.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/pdcraid.c Sat Jul 20 12:12:35 2002 @@ -12,7 +12,7 @@ Authors: Arjan van de Ven - Based on work done by Søren Schmidt for FreeBSD + Based on work done by Søren Schmidt for FreeBSD */ @@ -31,11 +31,14 @@ #include "ataraid.h" -static int pdcraid_open(struct inode * inode, struct file * filp); -static int pdcraid_release(struct inode * inode, struct file * filp); -static int pdcraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -static int pdcraid0_make_request (request_queue_t *q, int rw, struct buffer_head * bh); -static int pdcraid1_make_request (request_queue_t *q, int rw, struct buffer_head * bh); +static int pdcraid_open(struct inode *inode, struct file *filp); +static int pdcraid_release(struct inode *inode, struct file *filp); +static int pdcraid_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); +static int pdcraid0_make_request(request_queue_t * q, int rw, + struct buffer_head *bh); +static int pdcraid1_make_request(request_queue_t * q, int rw, + struct buffer_head *bh); struct disk_dev { int major; @@ -43,26 +46,26 @@ int device; }; -static struct disk_dev devlist[]= { - {IDE0_MAJOR, 0, -1 }, - {IDE0_MAJOR, 64, -1 }, - {IDE1_MAJOR, 0, -1 }, - {IDE1_MAJOR, 64, -1 }, - {IDE2_MAJOR, 0, -1 }, - {IDE2_MAJOR, 64, -1 }, - {IDE3_MAJOR, 0, -1 }, - {IDE3_MAJOR, 64, -1 }, - {IDE4_MAJOR, 0, -1 }, - {IDE4_MAJOR, 64, -1 }, - {IDE5_MAJOR, 0, -1 }, - {IDE5_MAJOR, 64, -1 }, - {IDE6_MAJOR, 0, -1 }, - {IDE6_MAJOR, 64, -1 }, +static struct disk_dev devlist[] = { + {IDE0_MAJOR, 0, -1}, + {IDE0_MAJOR, 64, -1}, + {IDE1_MAJOR, 0, -1}, + {IDE1_MAJOR, 64, -1}, + {IDE2_MAJOR, 0, -1}, + {IDE2_MAJOR, 64, -1}, + {IDE3_MAJOR, 0, -1}, + {IDE3_MAJOR, 64, -1}, + {IDE4_MAJOR, 0, -1}, + {IDE4_MAJOR, 64, -1}, + {IDE5_MAJOR, 0, -1}, + {IDE5_MAJOR, 64, -1}, + {IDE6_MAJOR, 0, -1}, + {IDE6_MAJOR, 64, -1}, }; struct pdcdisk { - kdev_t device; + kdev_t device; unsigned long sectors; struct block_device *bdev; unsigned long last_pos; @@ -73,133 +76,163 @@ unsigned int disks; unsigned long sectors; struct geom geom; - + struct pdcdisk disk[8]; - + unsigned long cutoff[8]; unsigned int cutoff_disks[8]; }; static struct raid_device_operations pdcraid0_ops = { - open: pdcraid_open, - release: pdcraid_release, - ioctl: pdcraid_ioctl, - make_request: pdcraid0_make_request + .open = pdcraid_open, + .release = pdcraid_release, + .ioctl = pdcraid_ioctl, + .make_request = pdcraid0_make_request }; static struct raid_device_operations pdcraid1_ops = { - open: pdcraid_open, - release: pdcraid_release, - ioctl: pdcraid_ioctl, - make_request: pdcraid1_make_request + .open = pdcraid_open, + .release = pdcraid_release, + .ioctl = pdcraid_ioctl, + .make_request = pdcraid1_make_request }; static struct pdcraid raid[16]; -static int pdcraid_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static int pdcraid_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) { unsigned int minor; - unsigned long sectors; + unsigned long sectors; - if (!inode || kdev_none(inode->i_rdev)) + if (!inode || kdev_none(inode->i_rdev)) return -EINVAL; - minor = minor(inode->i_rdev)>>SHIFT; - + minor = minor(inode->i_rdev) >> SHIFT; + switch (cmd) { - case BLKGETSIZE: /* Return device size */ - if (!arg) return -EINVAL; - sectors = ataraid_gendisk.part[minor(inode->i_rdev)].nr_sects; - if (minor(inode->i_rdev)&15) - return put_user(sectors, (unsigned long *) arg); - return put_user(raid[minor].sectors , (unsigned long *) arg); - break; - + case BLKGETSIZE: /* Return device size */ + if (!arg) + return -EINVAL; + sectors = + ataraid_gendisk.part[minor(inode->i_rdev)].nr_sects; + if (minor(inode->i_rdev) & 15) + return put_user(sectors, (unsigned long *) arg); + return put_user(raid[minor].sectors, + (unsigned long *) arg); + break; + - case HDIO_GETGEO: + case HDIO_GETGEO: { - struct hd_geometry *loc = (struct hd_geometry *) arg; - unsigned short bios_cyl = raid[minor].geom.cylinders; /* truncate */ - - if (!loc) return -EINVAL; - if (put_user(raid[minor].geom.heads, (byte *) &loc->heads)) return -EFAULT; - if (put_user(raid[minor].geom.sectors, (byte *) &loc->sectors)) return -EFAULT; - if (put_user(bios_cyl, (unsigned short *) &loc->cylinders)) return -EFAULT; - if (put_user((unsigned)ataraid_gendisk.part[minor(inode->i_rdev)].start_sect, - (unsigned long *) &loc->start)) return -EFAULT; + struct hd_geometry *loc = + (struct hd_geometry *) arg; + unsigned short bios_cyl = raid[minor].geom.cylinders; /* truncate */ + + if (!loc) + return -EINVAL; + if (put_user + (raid[minor].geom.heads, + (byte *) & loc->heads)) + return -EFAULT; + if (put_user + (raid[minor].geom.sectors, + (byte *) & loc->sectors)) + return -EFAULT; + if (put_user + (bios_cyl, (unsigned short *) &loc->cylinders)) + return -EFAULT; + if (put_user + ((unsigned) ataraid_gendisk. + part[minor(inode->i_rdev)].start_sect, + (unsigned long *) &loc->start)) + return -EFAULT; return 0; } - case BLKROSET: - case BLKROGET: - case BLKSSZGET: - return blk_ioctl(inode->i_bdev, cmd, arg); + case BLKROSET: + case BLKROGET: + case BLKSSZGET: + return blk_ioctl(inode->i_bdev, cmd, arg); - default: - printk("Invalid ioctl \n"); - return -EINVAL; + default: + printk("Invalid ioctl \n"); + return -EINVAL; }; return 0; } -unsigned long partition_map_normal(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) +unsigned long partition_map_normal(unsigned long block, + unsigned long partition_off, + unsigned long partition_size, + int stride) { return block + partition_off; } -unsigned long partition_map_linux(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) +unsigned long partition_map_linux(unsigned long block, + unsigned long partition_off, + unsigned long partition_size, int stride) { unsigned long newblock; - - newblock = stride - (partition_off%stride); if (newblock == stride) newblock = 0; + + newblock = stride - (partition_off % stride); + if (newblock == stride) + newblock = 0; newblock += block; newblock = newblock % partition_size; newblock += partition_off; - + return newblock; } -static int funky_remap[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; +static int funky_remap[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; -unsigned long partition_map_linux_raid0_4disk(unsigned long block, unsigned long partition_off, unsigned long partition_size, int stride) +unsigned long partition_map_linux_raid0_4disk(unsigned long block, + unsigned long partition_off, + unsigned long partition_size, + int stride) { - unsigned long newblock,temp,temp2; - - newblock = stride - (partition_off%stride); if (newblock == stride) newblock = 0; + unsigned long newblock, temp, temp2; + + newblock = stride - (partition_off % stride); + if (newblock == stride) + newblock = 0; - if (block < (partition_size / (8*stride))*8*stride ) { + if (block < (partition_size / (8 * stride)) * 8 * stride) { temp = block % stride; temp2 = block / stride; - temp2 = ((temp2>>3)<<3)|(funky_remap[temp2&7]); - block = temp2*stride+temp; + temp2 = ((temp2 >> 3) << 3) | (funky_remap[temp2 & 7]); + block = temp2 * stride + temp; } - + newblock += block; newblock = newblock % partition_size; newblock += partition_off; - + return newblock; } -static int pdcraid0_make_request (request_queue_t *q, int rw, struct buffer_head * bh) +static int pdcraid0_make_request(request_queue_t * q, int rw, + struct buffer_head *bh) { unsigned long rsect; - unsigned long rsect_left,rsect_accum = 0; + unsigned long rsect_left, rsect_accum = 0; unsigned long block; - unsigned int disk=0,real_disk=0; + unsigned int disk = 0, real_disk = 0; int i; int device; struct pdcraid *thisraid; rsect = bh->b_rsector; - + /* Ok. We need to modify this sector number to a new disk + new sector number. * If there are disks of different sizes, this gets tricky. * Example with 3 disks (1Gb, 4Gb and 5 GB): @@ -211,52 +244,66 @@ * a disk falls out of the "higher" count, we mark the max sector. So once we pass a cutoff * point, we have to divide by one less. */ - - device = (bh->b_rdev >> SHIFT)&MAJOR_MASK; + + device = (bh->b_rdev >> SHIFT) & MAJOR_MASK; thisraid = &raid[device]; - if (thisraid->stride==0) - thisraid->stride=1; + if (thisraid->stride == 0) + thisraid->stride = 1; /* Partitions need adding of the start sector of the partition to the requested sector */ - - rsect = partition_map_normal(rsect, ataraid_gendisk.part[MINOR(bh->b_rdev)].start_sect, ataraid_gendisk.part[MINOR(bh->b_rdev)].nr_sects, thisraid->stride); + + rsect = + partition_map_normal(rsect, + ataraid_gendisk.part[MINOR(bh->b_rdev)]. + start_sect, + ataraid_gendisk.part[MINOR(bh->b_rdev)]. + nr_sects, thisraid->stride); /* Woops we need to split the request to avoid crossing a stride barrier */ - if ((rsect/thisraid->stride) != ((rsect+(bh->b_size/512)-1)/thisraid->stride)) { - return -1; + if ((rsect / thisraid->stride) != + ((rsect + (bh->b_size / 512) - 1) / thisraid->stride)) { + return -1; } - + rsect_left = rsect; - - for (i=0;i<8;i++) { - if (thisraid->cutoff_disks[i]==0) + + for (i = 0; i < 8; i++) { + if (thisraid->cutoff_disks[i] == 0) break; if (rsect > thisraid->cutoff[i]) { /* we're in the wrong area so far */ rsect_left -= thisraid->cutoff[i]; - rsect_accum += thisraid->cutoff[i]/thisraid->cutoff_disks[i]; + rsect_accum += + thisraid->cutoff[i] / + thisraid->cutoff_disks[i]; } else { block = rsect_left / thisraid->stride; disk = block % thisraid->cutoff_disks[i]; - block = (block / thisraid->cutoff_disks[i]) * thisraid->stride; - rsect = rsect_accum + (rsect_left % thisraid->stride) + block; + block = + (block / thisraid->cutoff_disks[i]) * + thisraid->stride; + rsect = + rsect_accum + (rsect_left % thisraid->stride) + + block; break; } } - - for (i=0;i<8;i++) { - if ((disk==0) && (thisraid->disk[i].sectors > rsect_accum)) { + + for (i = 0; i < 8; i++) { + if ((disk == 0) + && (thisraid->disk[i].sectors > rsect_accum)) { real_disk = i; break; } - if ((disk>0) && (thisraid->disk[i].sectors >= rsect_accum)) { + if ((disk > 0) + && (thisraid->disk[i].sectors >= rsect_accum)) { disk--; } - + } disk = real_disk; - - + + /* * The new BH_Lock semantics in ll_rw_blk.c guarantee that this * is the only IO operation happening on this bh. @@ -270,106 +317,112 @@ return 1; } -static int pdcraid1_write_request(request_queue_t *q, int rw, struct buffer_head * bh) +static int pdcraid1_write_request(request_queue_t * q, int rw, + struct buffer_head *bh) { struct buffer_head *bh1; struct ataraid_bh_private *private; int device; int i; - device = (bh->b_rdev >> SHIFT)&MAJOR_MASK; + device = (bh->b_rdev >> SHIFT) & MAJOR_MASK; private = ataraid_get_private(); - if (private==NULL) + if (private == NULL) BUG(); private->parent = bh; - - atomic_set(&private->count,raid[device].disks); + + atomic_set(&private->count, raid[device].disks); - for (i = 0; i< raid[device].disks; i++) { - bh1=ataraid_get_bhead(); + for (i = 0; i < raid[device].disks; i++) { + bh1 = ataraid_get_bhead(); /* If this ever fails we're doomed */ if (!bh1) BUG(); - + /* dupe the bufferhead and update the parts that need to be different */ memcpy(bh1, bh, sizeof(*bh)); - + bh1->b_end_io = ataraid_end_request; bh1->b_private = private; - bh1->b_rsector += ataraid_gendisk.part[MINOR(bh->b_rdev)].start_sect; /* partition offset */ + bh1->b_rsector += ataraid_gendisk.part[MINOR(bh->b_rdev)].start_sect; /* partition offset */ bh1->b_rdev = raid[device].disk[i].device; /* update the last known head position for the drive */ - raid[device].disk[i].last_pos = bh1->b_rsector+(bh1->b_size>>9); + raid[device].disk[i].last_pos = + bh1->b_rsector + (bh1->b_size >> 9); - generic_make_request(rw,bh1); + generic_make_request(rw, bh1); } return 0; } -static int pdcraid1_read_request (request_queue_t *q, int rw, struct buffer_head * bh) +static int pdcraid1_read_request(request_queue_t * q, int rw, + struct buffer_head *bh) { int device; int dist; - int bestsofar,bestdist,i; + int bestsofar, bestdist, i; static int previous; /* Reads are simple in principle. Pick a disk and go. Initially I cheat by just picking the one which the last known head position is closest by. Later on, online/offline checking and performance needs adding */ - - device = (bh->b_rdev >> SHIFT)&MAJOR_MASK; - bh->b_rsector += ataraid_gendisk.part[MINOR(bh->b_rdev)].start_sect; - bestsofar = 0; + device = (bh->b_rdev >> SHIFT) & MAJOR_MASK; + bh->b_rsector += + ataraid_gendisk.part[MINOR(bh->b_rdev)].start_sect; + + bestsofar = 0; bestdist = raid[device].disk[0].last_pos - bh->b_rsector; - if (bestdist<0) - bestdist=-bestdist; - if (bestdist>4095) - bestdist=4095; + if (bestdist < 0) + bestdist = -bestdist; + if (bestdist > 4095) + bestdist = 4095; - for (i=1 ; ib_rsector; - if (dist<0) + if (dist < 0) dist = -dist; - if (dist>4095) - dist=4095; - - if (bestdist==dist) { /* it's a tie; try to do some read balancing */ - if ((previous>bestsofar)&&(previous<=i)) + if (dist > 4095) + dist = 4095; + + if (bestdist == dist) { /* it's a tie; try to do some read balancing */ + if ((previous > bestsofar) && (previous <= i)) bestsofar = i; previous = (previous + 1) % raid[device].disks; - } else if (bestdist>dist) { + } else if (bestdist > dist) { bestdist = dist; bestsofar = i; } - + } - - bh->b_rdev = raid[device].disk[bestsofar].device; - raid[device].disk[bestsofar].last_pos = bh->b_rsector+(bh->b_size>>9); + + bh->b_rdev = raid[device].disk[bestsofar].device; + raid[device].disk[bestsofar].last_pos = + bh->b_rsector + (bh->b_size >> 9); /* * Let the main block layer submit the IO and resolve recursion: */ - + return 1; } -static int pdcraid1_make_request (request_queue_t *q, int rw, struct buffer_head * bh) +static int pdcraid1_make_request(request_queue_t * q, int rw, + struct buffer_head *bh) { /* Read and Write are totally different cases; split them totally here */ - if (rw==READA) + if (rw == READA) rw = READ; - - if (rw==READ) - return pdcraid1_read_request(q,rw,bh); + + if (rw == READ) + return pdcraid1_read_request(q, rw, bh); else - return pdcraid1_write_request(q,rw,bh); + return pdcraid1_write_request(q, rw, bh); } #include "pdcraid.h" @@ -379,23 +432,24 @@ unsigned long lba = 0; struct ata_device *ideinfo = get_info_ptr(to_kdev_t(bdev->bd_dev)); - if (ideinfo==NULL) + if (ideinfo == NULL) return 0; /* first sector of the last cluster */ - if (ideinfo->head==0) + if (ideinfo->head == 0) return 0; - if (ideinfo->sect==0) + if (ideinfo->sect == 0) return 0; - lba = (ideinfo->capacity / (ideinfo->head*ideinfo->sect)); - lba = lba * (ideinfo->head*ideinfo->sect); + lba = (ideinfo->capacity / (ideinfo->head * ideinfo->sect)); + lba = lba * (ideinfo->head * ideinfo->sect); lba = lba - ideinfo->sect; return lba; } -static int read_disk_sb(struct block_device *bdev, struct promise_raid_conf *p) +static int read_disk_sb(struct block_device *bdev, + struct promise_raid_conf *p) { unsigned long sb_offset; char *buffer; @@ -407,14 +461,15 @@ */ sb_offset = calc_pdcblock_offset(bdev); - if (sb_offset==0) - return -1; + if (sb_offset == 0) + return -1; - for (i = 0, buffer = (char*)p; i < 4; i++, buffer += 512) { + for (i = 0, buffer = (char *) p; i < 4; i++, buffer += 512) { Sector sect; char *q = read_dev_sector(bdev, sb_offset + i, §); if (!p) { - printk(KERN_ERR "pdcraid: Error reading superblock.\n"); + printk(KERN_ERR + "pdcraid: Error reading superblock.\n"); return -1; } memcpy(buffer, q, 512); @@ -423,127 +478,133 @@ return 0; } -static unsigned int calc_sb_csum (unsigned int* ptr) -{ +static unsigned int calc_sb_csum(unsigned int *ptr) +{ unsigned int sum; int count; - + sum = 0; - for (count=0;count<511;count++) + for (count = 0; count < 511; count++) sum += *ptr++; - + return sum; } static int cookie = 0; static struct promise_raid_conf __initdata prom; -static void __init probedisk(int devindex,int device, int raidlevel) +static void __init probedisk(int devindex, int device, int raidlevel) { int i; int major, minor; struct block_device *bdev; - if (devlist[devindex].device!=-1) /* already assigned to another array */ + if (devlist[devindex].device != -1) /* already assigned to another array */ return; - + major = devlist[devindex].major; - minor = devlist[devindex].minor; + minor = devlist[devindex].minor; - bdev = bdget(mk_kdev(major,minor)); + bdev = bdget(mk_kdev(major, minor)); if (!bdev) return; - if (blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_RAW) != 0) + if (blkdev_get(bdev, FMODE_READ | FMODE_WRITE, 0, BDEV_RAW) != 0) return; - if (read_disk_sb(bdev, &prom)) - goto out; + if (read_disk_sb(bdev, &prom)) + goto out; - /* the checksums must match */ - if (prom.checksum != calc_sb_csum((unsigned int*)&prom)) + /* the checksums must match */ + if (prom.checksum != calc_sb_csum((unsigned int *) &prom)) goto out; - if (prom.raid.type!=raidlevel) /* different raidlevel */ + if (prom.raid.type != raidlevel) /* different raidlevel */ goto out; - if ((cookie!=0) && (cookie != prom.raid.magic_1)) /* different array */ + if ((cookie != 0) && (cookie != prom.raid.magic_1)) /* different array */ goto out; - + cookie = prom.raid.magic_1; /* This looks evil. But basically, we have to search for our adapternumber - in the arraydefinition, both of which are in the superblock */ - for (i=0;(ibar)) + for (j = 0; j < 8; j++) + if ((raid[device].disk[j].sectors < smallest) + && (raid[device].disk[j].sectors > bar)) smallest = raid[device].disk[j].sectors; count = 0; - for (j=0;j<8;j++) + for (j = 0; j < 8; j++) if (raid[device].disk[j].sectors >= smallest) count++; - + smallest = smallest * count; bar = smallest; raid[device].cutoff[i] = smallest; raid[device].cutoff_disks[i] = count; } } - -static __init int pdcraid_init_one(int device,int raidlevel) + +static __init int pdcraid_init_one(int device, int raidlevel) { int i, count; - for (i=0; i<14; i++) + for (i = 0; i < 14; i++) probedisk(i, device, raidlevel); - - if (raidlevel==0) + + if (raidlevel == 0) fill_cutoff(device); - + /* Initialize the gendisk structure */ - - ataraid_register_disk(device,raid[device].sectors); - - count=0; - - for (i=0;i<8;i++) { - if (raid[device].disk[i].device!=0) { + + ataraid_register_disk(device, raid[device].sectors); + + count = 0; + + for (i = 0; i < 8; i++) { + if (raid[device].disk[i].device != 0) { printk(KERN_INFO "Drive %i is %li Mb (%i / %i) \n", - i,raid[device].disk[i].sectors/2048,major(raid[device].disk[i].device),minor(raid[device].disk[i].device)); + i, raid[device].disk[i].sectors / 2048, + major(raid[device].disk[i].device), + minor(raid[device].disk[i].device)); count++; } } if (count) { - printk(KERN_INFO "Raid%i array consists of %i drives. \n",raidlevel,count); + printk(KERN_INFO "Raid%i array consists of %i drives. \n", + raidlevel, count); return 0; } else { return -ENODEV; @@ -556,10 +617,10 @@ do { cookie = 0; - device=ataraid_get_device(&pdcraid0_ops); - if (device<0) + device = ataraid_get_device(&pdcraid0_ops); + if (device < 0) break; - retval = pdcraid_init_one(device,0); + retval = pdcraid_init_one(device, 0); if (retval) { ataraid_release_device(device); break; @@ -569,12 +630,12 @@ } while (1); do { - + cookie = 0; - device=ataraid_get_device(&pdcraid1_ops); - if (device<0) + device = ataraid_get_device(&pdcraid1_ops); + if (device < 0) break; - retval = pdcraid_init_one(device,1); + retval = pdcraid_init_one(device, 1); if (retval) { ataraid_release_device(device); break; @@ -584,35 +645,38 @@ } while (1); if (count) { - printk(KERN_INFO "Promise Fasttrak(tm) Softwareraid driver for linux version 0.03beta\n"); + printk(KERN_INFO + "Promise Fasttrak(tm) Softwareraid driver for linux version 0.03beta\n"); return 0; } - printk(KERN_DEBUG "Promise Fasttrak(tm) Softwareraid driver 0.03beta: No raid array found\n"); + printk(KERN_DEBUG + "Promise Fasttrak(tm) Softwareraid driver 0.03beta: No raid array found\n"); return -ENODEV; } -static void __exit pdcraid_exit (void) +static void __exit pdcraid_exit(void) { - int i,device; - for (device = 0; device<16; device++) { - for (i=0;i<8;i++) { - struct block_device *bdev = raid[device].disk[i].bdev; + int i, device; + for (device = 0; device < 16; device++) { + for (i = 0; i < 8; i++) { + struct block_device *bdev = + raid[device].disk[i].bdev; raid[device].disk[i].bdev = NULL; if (bdev) blkdev_put(bdev, BDEV_RAW); - } + } if (raid[device].sectors) ataraid_release_device(device); } } -static int pdcraid_open(struct inode * inode, struct file * filp) +static int pdcraid_open(struct inode *inode, struct file *filp) { MOD_INC_USE_COUNT; return 0; } -static int pdcraid_release(struct inode * inode, struct file * filp) -{ +static int pdcraid_release(struct inode *inode, struct file *filp) +{ MOD_DEC_USE_COUNT; return 0; } diff -Nru a/drivers/ide/piix.c b/drivers/ide/piix.c --- a/drivers/ide/piix.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/piix.c Sat Jul 20 12:12:35 2002 @@ -388,130 +388,130 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82371FB_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82371SB_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82371AB, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82443MX_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82372FB_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801AA_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801AB_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801BA_9, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801BA_8, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801E_9, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801CA_10, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801CA_11, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_INTEL, - device: PCI_DEVICE_ID_INTEL_82801DB_9, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD - }, - { - vendor: PCI_VENDOR_ID_EFAR, - device: PCI_DEVICE_ID_EFAR_SLC90E66_1, - init_chipset: piix_init_chipset, - init_channel: piix_init_channel, - init_dma: piix_init_dma, - enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82371FB_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82371SB_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82371AB, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82443MX_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82372FB_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801AA_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801AB_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801BA_9, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801BA_8, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801E_9, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801CA_10, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801CA_11, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_82801DB_9, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD + }, + { + .vendor = PCI_VENDOR_ID_EFAR, + .device = PCI_DEVICE_ID_EFAR_SLC90E66_1, + .init_chipset = piix_init_chipset, + .init_channel = piix_init_channel, + .init_dma = piix_init_dma, + .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, + .bootable = ON_BOARD }, }; @@ -519,9 +519,8 @@ { int i; - for (i = 0; i < ARRAY_SIZE(chipsets); ++i) { + for (i = 0; i < ARRAY_SIZE(chipsets); ++i) ata_register_chipset(&chipsets[i]); - } return 0; } diff -Nru a/drivers/ide/probe.c b/drivers/ide/probe.c --- a/drivers/ide/probe.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/probe.c Sat Jul 20 12:12:34 2002 @@ -97,11 +97,15 @@ } } - /* There used to be code here that assigned drive->id->CHS - to drive->CHS and that to drive->bios_CHS. However, some disks have - id->C/H/S = 4092/16/63 but are larger than 2.1 GB. In such cases - that code was wrong. Moreover, there seems to be no reason to do - any of these things. */ + /* There used to be code here that assigned drive->id->CHS to + * drive->CHS and that to drive->bios_CHS. However, some disks have + * id->C/H/S = 4092/16/63 but are larger than 2.1 GB. In such cases + * that code was wrong. Moreover, there seems to be no reason to do + * any of these things. + * + * Please note that recent RedHat changes to the disk utils are bogous + * and will report spurious errors. + */ /* translate? */ if (drive->forced_geom) @@ -169,8 +173,8 @@ } /* - * hd_driveid data come as little endian, it needs to be converted on big - * endian machines. + * Drive ID data come as little endian, it needs to be converted on big endian + * machines. */ void ata_fix_driveid(struct hd_driveid *id) { @@ -319,11 +323,8 @@ outb(inb(ch->dma_base + 2) & ~(1 << (5 + unit)), ch->dma_base + 2); #endif - /* Don't use ide_wait_cmd here - it will attempt to set_geometry and - * recalibrate, but for some reason these don't work at this point - * (lost interrupt). - * - * Select the drive, and issue the SETFEATURES command + /* + * Select the drive, and issue the SETFEATURES command. */ disable_irq(ch->irq); /* disable_irq_nosync ?? */ udelay(1); @@ -339,7 +340,6 @@ udelay(1); ret = ata_status_poll(drive, 0, BUSY_STAT, WAIT_CMD, NULL); ata_mask(drive); - enable_irq(ch->irq); if (ret != ATA_OP_READY) { @@ -403,7 +403,7 @@ */ ata_read(drive, id, SECTOR_WORDS); - ide__sti(); /* local CPU only */ + local_irq_enable(); ata_fix_driveid(id); if (id->word156 == 0x4d42) { @@ -616,12 +616,12 @@ if (ata_status(drive, DRQ_STAT, BAD_R_STAT)) { unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only; some systems need this */ + + local_irq_save(flags); /* some systems need this */ do_identify(drive, cmd); /* drive returned ID */ rc = 0; /* drive responded with ID */ ata_status(drive, 0, 0); /* clear drive IRQ */ - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); /* local CPU only */ } else rc = 2; /* drive refused ID */ @@ -733,8 +733,8 @@ ch->straight8 = 0; - __save_flags(flags); /* local CPU only */ - __sti(); /* local CPU only; needed for jiffies and irq probing */ + __save_flags(flags); + local_irq_enable(); /* needed for jiffies and irq probing */ /* * Check for the presence of a channel by probing for drives on it. @@ -852,7 +852,7 @@ if (ch->reset) ata_reset(ch); - __restore_flags(flags); /* local CPU only */ + __restore_flags(flags); /* * Now setup the PIO transfer modes of the drives on this channel. @@ -1127,6 +1127,7 @@ gd->sizes = kmalloc(ATA_MINORS * sizeof(int), GFP_KERNEL); if (!gd->sizes) goto err_kmalloc_gd_sizes; + memset(gd->sizes, 0, ATA_MINORS*sizeof(gd->sizes[0])); gd->part = kmalloc(ATA_MINORS * sizeof(struct hd_struct), GFP_KERNEL); if (!gd->part) diff -Nru a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c --- a/drivers/ide/q40ide.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/q40ide.c Sat Jul 20 12:12:35 2002 @@ -57,7 +57,7 @@ static int q40ide_default_irq(q40ide_ioreg_t base) { - switch (base) { + switch (base) { case 0x1f0: return 14; case 0x170: return 15; case 0x1e8: return 11; @@ -66,12 +66,9 @@ } } - - - /* - * Probe for Q40 IDE interfaces - */ - +/* + * Probe for Q40 IDE interfaces + */ void q40ide_init(void) { int i; diff -Nru a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c --- a/drivers/ide/rz1000.c Sat Jul 20 12:12:34 2002 +++ b/drivers/ide/rz1000.c Sat Jul 20 12:12:34 2002 @@ -51,16 +51,16 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_PCTECH, - device: PCI_DEVICE_ID_PCTECH_RZ1000, - init_channel: rz1000_init_channel, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_PCTECH, + .device = PCI_DEVICE_ID_PCTECH_RZ1000, + .init_channel = rz1000_init_channel, + .bootable = ON_BOARD }, { - vendor: PCI_VENDOR_ID_PCTECH, - device: PCI_DEVICE_ID_PCTECH_RZ1001, - init_channel: rz1000_init_channel, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_PCTECH, + .device = PCI_DEVICE_ID_PCTECH_RZ1001, + .init_channel = rz1000_init_channel, + .bootable = ON_BOARD }, }; @@ -68,9 +68,8 @@ { int i; - for (i = 0; i < ARRAY_SIZE(chipsets); ++i) { + for (i = 0; i < ARRAY_SIZE(chipsets); ++i) ata_register_chipset(&chipsets[i]); - } return 0; } diff -Nru a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c --- a/drivers/ide/serverworks.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/serverworks.c Sat Jul 20 12:12:35 2002 @@ -394,20 +394,20 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_SERVERWORKS, - device: PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, - init_chipset: svwks_init_chipset, - init_channel: ide_init_svwks, - bootable: ON_BOARD, - flags: ATA_F_DMA + .vendor = PCI_VENDOR_ID_SERVERWORKS, + .device = PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, + .init_chipset = svwks_init_chipset, + .init_channel = ide_init_svwks, + .bootable = ON_BOARD, + .flags = ATA_F_DMA }, { - vendor: PCI_VENDOR_ID_SERVERWORKS, - device: PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, - init_chipset: svwks_init_chipset, - init_channel: ide_init_svwks, - bootable: ON_BOARD, - flags: ATA_F_SIMPLEX + .vendor = PCI_VENDOR_ID_SERVERWORKS, + .device = PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, + .init_chipset = svwks_init_chipset, + .init_channel = ide_init_svwks, + .bootable = ON_BOARD, + .flags = ATA_F_SIMPLEX }, }; diff -Nru a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c --- a/drivers/ide/sis5513.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/sis5513.c Sat Jul 20 12:12:35 2002 @@ -505,12 +505,12 @@ /* module data table */ static struct ata_pci_device chipset __initdata = { - vendor: PCI_VENDOR_ID_SI, - device: PCI_DEVICE_ID_SI_5513, - init_chipset: pci_init_sis5513, - init_channel: ide_init_sis5513, - enablebits: {{0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, - bootable: ON_BOARD, + .vendor = PCI_VENDOR_ID_SI, + .device = PCI_DEVICE_ID_SI_5513, + .init_chipset = pci_init_sis5513, + .init_channel = ide_init_sis5513, + .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04} }, + .bootable = ON_BOARD, }; int __init init_sis5513(void) diff -Nru a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c --- a/drivers/ide/sl82c105.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/sl82c105.c Sat Jul 20 12:12:35 2002 @@ -360,13 +360,13 @@ /* module data table */ static struct ata_pci_device chipset __initdata = { - vendor: PCI_VENDOR_ID_WINBOND, - device: PCI_DEVICE_ID_WINBOND_82C105, - init_chipset: sl82c105_init_chipset, - init_channel: sl82c105_init_channel, - init_dma: sl82c105_init_dma, - enablebits: { {0x40,0x01,0x01}, {0x40,0x10,0x10} }, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_WINBOND, + .device = PCI_DEVICE_ID_WINBOND_82C105, + .init_chipset = sl82c105_init_chipset, + .init_channel = sl82c105_init_channel, + .init_dma = sl82c105_init_dma, + .enablebits = { {0x40,0x01,0x01}, {0x40,0x10,0x10} }, + .bootable = ON_BOARD }; int __init init_sl82c105(void) diff -Nru a/drivers/ide/tcq.c b/drivers/ide/tcq.c --- a/drivers/ide/tcq.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/tcq.c Sat Jul 20 12:12:35 2002 @@ -61,7 +61,7 @@ struct ata_taskfile *args = rq->special; struct ata_channel *ch = drive->channel; - ide__sti(); + local_irq_enable(); spin_lock_irqsave(ch->lock, flags); @@ -134,7 +134,6 @@ ar->XXX_handler = tcq_nop_handler; ar->command_type = IDE_DRIVE_TASK_NO_DATA; - rq->rq_dev = mk_kdev(drive->channel->major, (drive->select.b.unit)<tag); - __ata_end_request(drive, rq, !dma_stat, rq->nr_sectors); + ata_end_request(drive, rq, !dma_stat, rq->nr_sectors); /* * we completed this command, check if we can service a new command diff -Nru a/drivers/ide/trm290.c b/drivers/ide/trm290.c --- a/drivers/ide/trm290.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/trm290.c Sat Jul 20 12:12:35 2002 @@ -151,8 +151,7 @@ /* select PIO or DMA */ reg = use_dma ? (0x21 | 0x82) : (0x21 & ~0x82); - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); if (reg != hwif->select_data) { hwif->select_data = reg; @@ -167,7 +166,7 @@ outw(reg, hwif->config_data+3); } - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); } static void trm290_selectproc(struct ata_device *drive) @@ -266,8 +265,7 @@ printk("TRM290: using default config base at 0x%04lx\n", hwif->config_data); } - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ + local_irq_save(flags); /* put config reg into first byte of hwif->select_data */ outb(0x51|(hwif->unit<<3), hwif->config_data+1); hwif->select_data = 0x21; /* select PIO as default */ @@ -275,7 +273,7 @@ reg = inb(hwif->config_data+3); /* get IRQ info */ reg = (reg & 0x10) | 0x03; /* mask IRQs for both ports */ outb(reg, hwif->config_data+3); - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); if ((reg & 0x10)) hwif->irq = hwif->unit ? 15 : 14; /* legacy mode */ @@ -327,10 +325,10 @@ /* module data table */ static struct ata_pci_device chipset __initdata = { - vendor: PCI_VENDOR_ID_TEKRAM, - device: PCI_DEVICE_ID_TEKRAM_DC290, - init_channel: trm290_init_channel, - bootable: ON_BOARD + .vendor = PCI_VENDOR_ID_TEKRAM, + .device = PCI_DEVICE_ID_TEKRAM_DC290, + .init_channel = trm290_init_channel, + .bootable = ON_BOARD }; int __init init_trm290(void) diff -Nru a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c --- a/drivers/ide/umc8672.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/umc8672.c Sat Jul 20 12:12:35 2002 @@ -127,30 +127,29 @@ { unsigned long flags; - __save_flags(flags); /* local CPU only */ - __cli(); /* local CPU only */ - if (check_region(0x108, 2)) { - __restore_flags(flags); + local_irq_save(flags); + if (!request_region(0x108, 2, "umc8672")) { + local_irq_restore(flags); printk("\numc8672: PORTS 0x108-0x109 ALREADY IN USE\n"); return; } outb_p (0x5A,0x108); /* enable umc */ if (in_umc (0xd5) != 0xa0) { - __restore_flags(flags); /* local CPU only */ + local_irq_restore(flags); + release_region(0x108, 2); printk ("umc8672: not found\n"); return; } outb_p (0xa5,0x108); /* disable umc */ - umc_set_speeds (current_speeds); - __restore_flags(flags); /* local CPU only */ + umc_set_speeds(current_speeds); + local_irq_restore(flags); - request_region(0x108, 2, "umc8672"); ide_hwifs[0].chipset = ide_umc8672; ide_hwifs[1].chipset = ide_umc8672; - ide_hwifs[0].tuneproc = &tune_umc; - ide_hwifs[1].tuneproc = &tune_umc; + ide_hwifs[0].tuneproc = tune_umc; + ide_hwifs[1].tuneproc = tune_umc; ide_hwifs[0].unit = ATA_PRIMARY; ide_hwifs[1].unit = ATA_SECONDARY; } diff -Nru a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c --- a/drivers/ide/via82cxxx.c Sat Jul 20 12:12:35 2002 +++ b/drivers/ide/via82cxxx.c Sat Jul 20 12:12:35 2002 @@ -380,22 +380,22 @@ /* module data table */ static struct ata_pci_device chipsets[] __initdata = { { - vendor: PCI_VENDOR_ID_VIA, - device: PCI_DEVICE_ID_VIA_82C576_1, - init_chipset: via82cxxx_init_chipset, - init_channel: via82cxxx_init_channel, - init_dma: via82cxxx_init_dma, - enablebits: {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, - bootable: ON_BOARD, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_82C576_1, + .init_chipset = via82cxxx_init_chipset, + .init_channel = via82cxxx_init_channel, + .init_dma = via82cxxx_init_dma, + .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, + .bootable = ON_BOARD, }, { - vendor: PCI_VENDOR_ID_VIA, - device: PCI_DEVICE_ID_VIA_82C586_1, - init_chipset: via82cxxx_init_chipset, - init_channel: via82cxxx_init_channel, - init_dma: via82cxxx_init_dma, - enablebits: {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, - bootable: ON_BOARD, + .vendor = PCI_VENDOR_ID_VIA, + .device = PCI_DEVICE_ID_VIA_82C586_1, + .init_chipset = via82cxxx_init_chipset, + .init_channel = via82cxxx_init_channel, + .init_dma = via82cxxx_init_dma, + .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, + .bootable = ON_BOARD, }, }; @@ -403,9 +403,8 @@ { int i; - for (i = 0; i < ARRAY_SIZE(chipsets); ++i) { + for (i = 0; i < ARRAY_SIZE(chipsets); ++i) ata_register_chipset(&chipsets[i]); - } return 0; } diff -Nru a/drivers/md/linear.c b/drivers/md/linear.c --- a/drivers/md/linear.c Sat Jul 20 12:12:35 2002 +++ b/drivers/md/linear.c Sat Jul 20 12:12:35 2002 @@ -52,23 +52,22 @@ conf->smallest = NULL; cnt = 0; ITERATE_RDEV(mddev,rdev,tmp) { - int j = rdev->sb->this_disk.raid_disk; + int j = rdev->raid_disk; dev_info_t *disk = conf->disks + j; - if (j < 0 || j > mddev->sb->raid_disks || disk->bdev) { + if (j < 0 || j > mddev->raid_disks || disk->bdev) { printk("linear: disk numbering problem. Aborting!\n"); goto out; } disk->bdev = rdev->bdev; - atomic_inc(&rdev->bdev->bd_count); disk->size = rdev->size; if (!conf->smallest || (disk->size < conf->smallest->size)) conf->smallest = disk; cnt++; } - if (cnt != mddev->sb->raid_disks) { + if (cnt != mddev->raid_disks) { printk("linear: not enough drives present. Aborting!\n"); goto out; } @@ -112,12 +111,8 @@ return 0; out: - if (conf) { - for (i = 0; i < MD_SB_DISKS; i++) - if (conf->disks[i].bdev) - bdput(conf->disks[i].bdev); + if (conf) kfree(conf); - } MOD_DEC_USE_COUNT; return 1; } @@ -125,11 +120,7 @@ static int linear_stop (mddev_t *mddev) { linear_conf_t *conf = mddev_to_conf(mddev); - int i; - for (i = 0; i < MD_SB_DISKS; i++) - if (conf->disks[i].bdev) - bdput(conf->disks[i].bdev); kfree(conf->hash_table); kfree(conf); @@ -195,7 +186,7 @@ } sz += sprintf(page+sz, "\n"); #endif - sz += sprintf(page+sz, " %dk rounding", mddev->sb->chunk_size/1024); + sz += sprintf(page+sz, " %dk rounding", mddev->chunk_size/1024); return sz; } diff -Nru a/drivers/md/md.c b/drivers/md/md.c --- a/drivers/md/md.c Sat Jul 20 12:12:35 2002 +++ b/drivers/md/md.c Sat Jul 20 12:12:35 2002 @@ -36,6 +36,7 @@ #include #include #include +#include /* for invalidate_bdev */ #include @@ -175,7 +176,7 @@ { if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) return; - if (!mddev->sb && list_empty(&mddev->disks)) { + if (!mddev->raid_disks && list_empty(&mddev->disks)) { list_del(&mddev->all_mddevs); mddev_map[mdidx(mddev)] = NULL; kfree(mddev); @@ -242,19 +243,31 @@ struct list_head *tmp; ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->desc_nr == nr) + if (rdev->raid_disk == nr) return rdev; } return NULL; } -mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev) +static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) { struct list_head *tmp; mdk_rdev_t *rdev; ITERATE_RDEV(mddev,rdev,tmp) { - if (kdev_same(rdev->dev, dev)) + if (rdev->bdev->bd_dev == dev) + return rdev; + } + return NULL; +} + +static mdk_rdev_t * find_rdev_bdev(mddev_t * mddev, struct block_device *bdev) +{ + struct list_head *tmp; + mdk_rdev_t *rdev; + + ITERATE_RDEV(mddev,rdev,tmp) { + if (rdev->bdev == bdev) return rdev; } return NULL; @@ -297,26 +310,22 @@ return dname->name; } -static unsigned int calc_dev_sboffset(mdk_rdev_t *rdev, mddev_t *mddev, - int persistent) +static unsigned int calc_dev_sboffset(struct block_device *bdev) { - unsigned int size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; - if (persistent) - size = MD_NEW_SIZE_BLOCKS(size); - return size; + unsigned int size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; + return MD_NEW_SIZE_BLOCKS(size); } -static unsigned int calc_dev_size(mdk_rdev_t *rdev, mddev_t *mddev, int persistent) +static unsigned int calc_dev_size(struct block_device *bdev, mddev_t *mddev) { unsigned int size; - size = calc_dev_sboffset(rdev, mddev, persistent); - if (!mddev->sb) { - MD_BUG(); - return size; - } - if (mddev->sb->chunk_size) - size &= ~(mddev->sb->chunk_size/1024 - 1); + if (mddev->persistent) + size = calc_dev_sboffset(bdev); + else + size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; + if (mddev->chunk_size) + size &= ~(mddev->chunk_size/1024 - 1); return size; } @@ -326,14 +335,10 @@ mdk_rdev_t * rdev; struct list_head *tmp; - if (!mddev->sb) { - MD_BUG(); - return -EINVAL; - } /* * do size and offset calculations. */ - mask = ~(mddev->sb->chunk_size/1024 - 1); + mask = ~(mddev->chunk_size/1024 - 1); ITERATE_RDEV(mddev,rdev,tmp) { rdev->size &= mask; @@ -342,23 +347,6 @@ return 0; } -static void remove_descriptor(mdp_disk_t *disk, mdp_super_t *sb) -{ - if (disk_active(disk)) { - sb->working_disks--; - } else { - if (disk_spare(disk)) { - sb->spare_disks--; - sb->working_disks--; - } else { - sb->failed_disks--; - } - } - sb->nr_disks--; - disk->major = 0; - disk->minor = 0; - mark_disk_removed(disk); -} #define BAD_MAGIC KERN_ERR \ "md: invalid raid superblock magic on %s\n" @@ -375,20 +363,6 @@ #define BAD_CSUM KERN_WARNING \ "md: invalid superblock checksum on %s\n" -static int alloc_array_sb(mddev_t * mddev) -{ - if (mddev->sb) { - MD_BUG(); - return 0; - } - - mddev->sb = (mdp_super_t *) __get_free_page (GFP_KERNEL); - if (!mddev->sb) - return -ENOMEM; - clear_page(mddev->sb); - return 0; -} - static int alloc_disk_sb(mdk_rdev_t * rdev) { if (rdev->sb_page) @@ -467,7 +441,7 @@ * * It also happens to be a multiple of 4Kb. */ - sb_offset = calc_dev_sboffset(rdev, rdev->mddev, 1); + sb_offset = calc_dev_sboffset(rdev->bdev); rdev->sb_offset = sb_offset; if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ)) @@ -477,7 +451,7 @@ return 0; fail: - printk(NO_SB,partition_name(rdev->dev)); + printk(NO_SB,bdev_partition_name(rdev->bdev)); return -EINVAL; } @@ -508,17 +482,17 @@ } if (sb->md_magic != MD_SB_MAGIC) { - printk(BAD_MAGIC, partition_name(rdev->dev)); + printk(BAD_MAGIC, bdev_partition_name(rdev->bdev)); goto abort; } if (sb->md_minor >= MAX_MD_DEVS) { - printk(BAD_MINOR, partition_name(rdev->dev), sb->md_minor); + printk(BAD_MINOR, bdev_partition_name(rdev->bdev), sb->md_minor); goto abort; } if (calc_sb_csum(sb) != sb->sb_csum) { - printk(BAD_CSUM, partition_name(rdev->dev)); + printk(BAD_CSUM, bdev_partition_name(rdev->bdev)); goto abort; } ret = 0; @@ -550,7 +524,6 @@ return 0; } -static LIST_HEAD(all_raid_disks); static LIST_HEAD(pending_raid_disks); static void bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) @@ -566,12 +539,12 @@ printk( KERN_WARNING "md%d: WARNING: %s appears to be on the same physical disk as %s. True\n" " protection against single-disk failure might be compromised.\n", - mdidx(mddev), partition_name(rdev->dev), - partition_name(same_pdev->dev)); + mdidx(mddev), bdev_partition_name(rdev->bdev), + bdev_partition_name(same_pdev->bdev)); list_add(&rdev->same_set, &mddev->disks); rdev->mddev = mddev; - printk(KERN_INFO "md: bind<%s>\n", partition_name(rdev->dev)); + printk(KERN_INFO "md: bind<%s>\n", bdev_partition_name(rdev->bdev)); } static void unbind_rdev_from_array(mdk_rdev_t * rdev) @@ -581,7 +554,7 @@ return; } list_del_init(&rdev->same_set); - printk(KERN_INFO "md: unbind<%s>\n", partition_name(rdev->dev)); + printk(KERN_INFO "md: unbind<%s>\n", bdev_partition_name(rdev->bdev)); rdev->mddev = NULL; } @@ -592,18 +565,18 @@ * inode is not enough, the SCSI module usage code needs * an explicit open() on the device] */ -static int lock_rdev(mdk_rdev_t *rdev) +static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) { int err = 0; struct block_device *bdev; - bdev = bdget(kdev_t_to_nr(rdev->dev)); + bdev = bdget(dev); if (!bdev) return -ENOMEM; err = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_RAW); if (err) return err; - err = bd_claim(bdev, lock_rdev); + err = bd_claim(bdev, rdev); if (err) { blkdev_put(bdev, BDEV_RAW); return err; @@ -622,25 +595,19 @@ blkdev_put(bdev, BDEV_RAW); } -void md_autodetect_dev(kdev_t dev); +void md_autodetect_dev(dev_t dev); static void export_rdev(mdk_rdev_t * rdev) { - printk(KERN_INFO "md: export_rdev(%s)\n",partition_name(rdev->dev)); + printk(KERN_INFO "md: export_rdev(%s)\n",bdev_partition_name(rdev->bdev)); if (rdev->mddev) MD_BUG(); - unlock_rdev(rdev); free_disk_sb(rdev); - list_del_init(&rdev->all); - if (!list_empty(&rdev->pending)) { - printk(KERN_INFO "md: (%s was pending)\n", - partition_name(rdev->dev)); - list_del_init(&rdev->pending); - } + list_del_init(&rdev->same_set); + unlock_rdev(rdev); #ifndef MODULE - md_autodetect_dev(rdev->dev); + md_autodetect_dev(rdev->bdev->bd_dev); #endif - rdev->dev = NODEV; rdev->faulty = 0; kfree(rdev); } @@ -655,12 +622,6 @@ { struct list_head *tmp; mdk_rdev_t *rdev; - mdp_super_t *sb = mddev->sb; - - if (mddev->sb) { - mddev->sb = NULL; - free_page((unsigned long) sb); - } ITERATE_RDEV(mddev,rdev,tmp) { if (!rdev->mddev) { @@ -671,6 +632,7 @@ } if (!list_empty(&mddev->disks)) MD_BUG(); + mddev->raid_disks = 0; } static void free_mddev(mddev_t *mddev) @@ -731,8 +693,8 @@ static void print_rdev(mdk_rdev_t *rdev) { - printk(KERN_INFO "md: rdev %s: O:%s, SZ:%08ld F:%d DN:%d ", - partition_name(rdev->dev), partition_name(rdev->old_dev), + printk(KERN_INFO "md: rdev %s, SZ:%08ld F:%d DN:%d ", + bdev_partition_name(rdev->bdev), rdev->size, rdev->faulty, rdev->desc_nr); if (rdev->sb) { printk(KERN_INFO "md: rdev superblock:\n"); @@ -755,13 +717,7 @@ printk("md%d: ", mdidx(mddev)); ITERATE_RDEV(mddev,rdev,tmp2) - printk("<%s>", partition_name(rdev->dev)); - - if (mddev->sb) { - printk(" array superblock:\n"); - print_sb(mddev->sb); - } else - printk(" no array superblock.\n"); + printk("<%s>", bdev_partition_name(rdev->bdev)); ITERATE_RDEV(mddev,rdev,tmp2) print_rdev(rdev); @@ -819,22 +775,8 @@ return 0; } -static mdk_rdev_t * find_rdev_all(kdev_t dev) -{ - struct list_head *tmp; - mdk_rdev_t *rdev; - - list_for_each(tmp, &all_raid_disks) { - rdev = list_entry(tmp, mdk_rdev_t, all); - if (kdev_same(rdev->dev, dev)) - return rdev; - } - return NULL; -} - static int write_disk_sb(mdk_rdev_t * rdev) { - kdev_t dev = rdev->dev; unsigned long sb_offset, size; if (!rdev->sb) { @@ -850,10 +792,10 @@ return 1; } - sb_offset = calc_dev_sboffset(rdev, rdev->mddev, 1); + sb_offset = calc_dev_sboffset(rdev->bdev); if (rdev->sb_offset != sb_offset) { printk(KERN_INFO "%s's sb offset has changed from %ld to %ld, skipping\n", - partition_name(dev), rdev->sb_offset, sb_offset); + bdev_partition_name(rdev->bdev), rdev->sb_offset, sb_offset); goto skip; } /* @@ -861,69 +803,133 @@ * its size has changed to zero silently, and the MD code does * not yet know that it's faulty. */ - size = calc_dev_size(rdev, rdev->mddev, 1); + size = calc_dev_size(rdev->bdev, rdev->mddev); if (size != rdev->size) { printk(KERN_INFO "%s's size has changed from %ld to %ld since import, skipping\n", - partition_name(dev), rdev->size, size); + bdev_partition_name(rdev->bdev), rdev->size, size); goto skip; } - printk(KERN_INFO "(write) %s's sb offset: %ld\n", partition_name(dev), sb_offset); + printk(KERN_INFO "(write) %s's sb offset: %ld\n", bdev_partition_name(rdev->bdev), sb_offset); if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE)) goto fail; skip: return 0; fail: - printk("md: write_disk_sb failed for device %s\n", partition_name(dev)); + printk("md: write_disk_sb failed for device %s\n", bdev_partition_name(rdev->bdev)); return 1; } -static void set_this_disk(mddev_t *mddev, mdk_rdev_t *rdev) +static void sync_sbs(mddev_t * mddev) { - int i, ok = 0; - mdp_disk_t *desc; + mdk_rdev_t *rdev; + mdp_super_t *sb; + struct list_head *tmp; - for (i = 0; i < MD_SB_DISKS; i++) { - desc = mddev->sb->disks + i; -#if 0 - if (disk_faulty(desc)) { - if (mk_kdev(desc->major,desc->minor) == rdev->dev) - ok = 1; - continue; - } -#endif - if (kdev_same(mk_kdev(desc->major,desc->minor), rdev->dev)) { - rdev->sb->this_disk = *desc; - rdev->desc_nr = desc->number; - ok = 1; - break; - } - } + /* make all rdev->sb match mddev data.. + * we setup the data in the first rdev and copy it + * to the others. + * + * 1/ zero out disks + * 2/ Add info for each disk, keeping track of highest desc_nr + * 3/ any empty disks < highest become removed + * + * disks[0] gets initialised to REMOVED because + * we cannot be sure from other fields if it has + * been initialised or not. + */ + int highest = 0; + int i; + int active=0, working=0,failed=0,spare=0,nr_disks=0; - if (!ok) { + if (list_empty(&mddev->disks)) { MD_BUG(); + return; } -} + rdev = list_entry(&mddev->disks.next, mdk_rdev_t, same_set); + sb = rdev->sb; -static int sync_sbs(mddev_t * mddev) -{ - mdk_rdev_t *rdev; - mdp_super_t *sb; - struct list_head *tmp; + memset(sb, 0, sizeof(*sb)); + + sb->md_magic = MD_SB_MAGIC; + sb->major_version = mddev->major_version; + sb->minor_version = mddev->minor_version; + sb->patch_version = mddev->patch_version; + sb->gvalid_words = 0; /* ignored */ + memcpy(&sb->set_uuid0, mddev->uuid+0, 4); + memcpy(&sb->set_uuid1, mddev->uuid+4, 4); + memcpy(&sb->set_uuid2, mddev->uuid+8, 4); + memcpy(&sb->set_uuid3, mddev->uuid+12,4); + + sb->ctime = mddev->ctime; + sb->level = mddev->level; + sb->size = mddev->size; + sb->raid_disks = mddev->raid_disks; + sb->md_minor = mddev->__minor; + sb->not_persistent = !mddev->persistent; + sb->utime = mddev->utime; + sb->state = mddev->state; + sb->events_hi = (mddev->events>>32); + sb->events_lo = (u32)mddev->events; + + sb->layout = mddev->layout; + sb->chunk_size = mddev->chunk_size; + + sb->disks[0].state = (1<disks[rdev->desc_nr]; + nr_disks++; + d->number = rdev->desc_nr; + d->major = MAJOR(rdev->bdev->bd_dev); + d->minor = MINOR(rdev->bdev->bd_dev); + d->raid_disk = rdev->raid_disk; + if (rdev->faulty) { + d->state = (1<in_sync) { + d->state = (1<state |= (1<state = 0; + spare++; + working++; + } + if (rdev->desc_nr > highest) + highest = rdev->desc_nr; + } + + /* now set the "removed" bit on any non-trailing holes */ + for (i=0; idisks[i]; + if (d->state == 0 && d->number == 0) { + d->number = i; + d->raid_disk = i; + d->state = (1<nr_disks = nr_disks; + sb->active_disks = active; + sb->working_disks = working; + sb->failed_disks = failed; + sb->spare_disks = spare; ITERATE_RDEV(mddev,rdev,tmp) { + mdp_super_t *this_sb; + if (rdev->faulty || rdev->alias_device) continue; - sb = rdev->sb; - *sb = *mddev->sb; - set_this_disk(mddev, rdev); - sb->sb_csum = calc_sb_csum(sb); + this_sb = rdev->sb; + if (this_sb != sb) + *this_sb = *sb; + this_sb->this_disk = this_sb->disks[rdev->desc_nr]; + this_sb->sb_csum = calc_sb_csum(this_sb); } - return 0; } -void __md_update_sb(mddev_t * mddev) +static void md_update_sb(mddev_t * mddev) { int err, count = 100; struct list_head *tmp; @@ -931,18 +937,17 @@ mddev->sb_dirty = 0; repeat: - mddev->sb->utime = CURRENT_TIME; - if (!(++mddev->sb->events_lo)) - ++mddev->sb->events_hi; + mddev->utime = CURRENT_TIME; + mddev->events ++; - if (!(mddev->sb->events_lo | mddev->sb->events_hi)) { + if (!mddev->events) { /* * oops, this 64-bit counter should never wrap. * Either we are in around ~1 trillion A.C., assuming * 1 reboot per second, or we have a bug: */ MD_BUG(); - mddev->sb->events_lo = mddev->sb->events_hi = 0xffffffff; + mddev->events --; } sync_sbs(mddev); @@ -950,7 +955,7 @@ * do not write anything to disk if using * nonpersistent superblocks */ - if (mddev->sb->not_persistent) + if (!mddev->persistent) return; printk(KERN_INFO "md: updating md%d RAID superblock on device\n", @@ -964,7 +969,7 @@ if (rdev->alias_device) printk("(skipping alias "); - printk("%s ", partition_name(rdev->dev)); + printk("%s ", bdev_partition_name(rdev->bdev)); if (!rdev->faulty && !rdev->alias_device) { printk("[events: %08lx]", (unsigned long)rdev->sb->events_lo); @@ -981,16 +986,6 @@ } } -void md_update_sb(mddev_t *mddev) -{ - if (mddev_lock(mddev)) - return; - if (mddev->sb_dirty) - __md_update_sb(mddev); - mddev_unlock(mddev); -} - - /* * Import a device. If 'on_disk', then sanity check the superblock * @@ -1001,18 +996,15 @@ * * a faulty rdev _never_ has rdev->sb set. */ -static mdk_rdev_t *md_import_device(kdev_t newdev, int on_disk) +static mdk_rdev_t *md_import_device(dev_t newdev, int on_disk) { int err; mdk_rdev_t *rdev; unsigned int size; - if (find_rdev_all(newdev)) - return ERR_PTR(-EEXIST); - rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { - printk(KERN_ERR "md: could not alloc mem for %s!\n", partition_name(newdev)); + printk(KERN_ERR "md: could not alloc mem for %s!\n", partition_name(to_kdev_t(newdev))); return ERR_PTR(-ENOMEM); } memset(rdev, 0, sizeof(*rdev)); @@ -1020,11 +1012,10 @@ if ((err = alloc_disk_sb(rdev))) goto abort_free; - rdev->dev = newdev; - if (lock_rdev(rdev)) { - printk(KERN_ERR "md: could not lock %s, zero-size? Marking faulty.\n", - partition_name(newdev)); - err = -EINVAL; + err = lock_rdev(rdev, newdev); + if (err) { + printk(KERN_ERR "md: could not lock %s.\n", + partition_name(to_kdev_t(newdev))); goto abort_free; } rdev->desc_nr = -1; @@ -1034,7 +1025,7 @@ if (!size) { printk(KERN_WARNING "md: %s has zero or unknown size, marking faulty!\n", - partition_name(newdev)); + bdev_partition_name(rdev->bdev)); err = -EINVAL; goto abort_free; } @@ -1042,26 +1033,15 @@ if (on_disk) { if ((err = read_disk_sb(rdev))) { printk(KERN_WARNING "md: could not read %s's sb, not importing!\n", - partition_name(newdev)); + bdev_partition_name(rdev->bdev)); goto abort_free; } if ((err = check_disk_sb(rdev))) { printk(KERN_WARNING "md: %s has invalid sb, not importing!\n", - partition_name(newdev)); + bdev_partition_name(rdev->bdev)); goto abort_free; } - - if (rdev->sb->level != -4) { - rdev->old_dev = mk_kdev(rdev->sb->this_disk.major, - rdev->sb->this_disk.minor); - rdev->desc_nr = rdev->sb->this_disk.number; - } else { - rdev->old_dev = NODEV; - rdev->desc_nr = -1; - } } - list_add(&rdev->all, &all_raid_disks); - INIT_LIST_HEAD(&rdev->pending); INIT_LIST_HEAD(&rdev->same_set); if (rdev->faulty && rdev->sb) @@ -1099,9 +1079,9 @@ static int analyze_sbs(mddev_t * mddev) { - int out_of_date = 0, i, first; - struct list_head *tmp, *tmp2; - mdk_rdev_t *rdev, *rdev2, *freshest; + int out_of_date = 0, i; + struct list_head *tmp; + mdk_rdev_t *rdev, *freshest; mdp_super_t *sb; /* @@ -1132,7 +1112,7 @@ continue; } if (!sb_equal(sb, rdev->sb)) { - printk(INCONSISTENT, partition_name(rdev->dev)); + printk(INCONSISTENT, bdev_partition_name(rdev->bdev)); kick_rdev_from_array(rdev); continue; } @@ -1143,10 +1123,6 @@ * find the freshest superblock, that one will be the superblock * that represents the whole array. */ - if (!mddev->sb) - if (alloc_array_sb(mddev)) - goto abort; - sb = mddev->sb; freshest = NULL; ITERATE_RDEV(mddev,rdev,tmp) { @@ -1163,7 +1139,7 @@ } printk(KERN_INFO "md: %s's event counter: %08lx\n", - partition_name(rdev->dev), + bdev_partition_name(rdev->bdev), (unsigned long)rdev->sb->events_lo); if (!freshest) { freshest = rdev; @@ -1182,9 +1158,29 @@ } if (out_of_date) { printk(OUT_OF_DATE); - printk(KERN_INFO "md: freshest: %s\n", partition_name(freshest->dev)); + printk(KERN_INFO "md: freshest: %s\n", bdev_partition_name(freshest->bdev)); } - memcpy (sb, freshest->sb, sizeof(*sb)); + + sb = freshest->sb; + + mddev->major_version = sb->major_version; + mddev->minor_version = sb->minor_version; + mddev->patch_version = sb->patch_version; + mddev->persistent = ! sb->not_persistent; + mddev->chunk_size = sb->chunk_size; + mddev->ctime = sb->ctime; + mddev->utime = sb->utime; + mddev->level = sb->level; + mddev->layout = sb->layout; + mddev->raid_disks = sb->raid_disks; + mddev->state = sb->state; + mddev->size = sb->size; + mddev->events = md_event(sb); + + memcpy(mddev->uuid+0, &sb->set_uuid0, 4); + memcpy(mddev->uuid+4, &sb->set_uuid1, 4); + memcpy(mddev->uuid+8, &sb->set_uuid2, 4); + memcpy(mddev->uuid+12,&sb->set_uuid3, 4); /* * at this point we have picked the 'best' superblock @@ -1196,240 +1192,58 @@ /* * Kick all non-fresh devices */ - __u64 ev1, ev2; + __u64 ev1; ev1 = md_event(rdev->sb); - ev2 = md_event(sb); ++ev1; - if (ev1 < ev2) { + if (ev1 < mddev->events) { printk(KERN_WARNING "md: kicking non-fresh %s from array!\n", - partition_name(rdev->dev)); + bdev_partition_name(rdev->bdev)); kick_rdev_from_array(rdev); continue; } } - /* - * Fix up changed device names ... but only if this disk has a - * recent update time. Use faulty checksum ones too. + /* set rdev->desc_nr for each device. + * for MULTIPATH, we just us sequential number as + * nothing else is meaningful */ - if (mddev->sb->level != -4) + i = 0; ITERATE_RDEV(mddev,rdev,tmp) { - __u64 ev1, ev2, ev3; - if (rdev->faulty || rdev->alias_device) { - MD_BUG(); - goto abort; - } - ev1 = md_event(rdev->sb); - ev2 = md_event(sb); - ev3 = ev2; - --ev3; - if (!kdev_same(rdev->dev, rdev->old_dev) && - ((ev1 == ev2) || (ev1 == ev3))) { + if (mddev->level == LEVEL_MULTIPATH) { + rdev->alias_device = !!i; + rdev->desc_nr = i++; + rdev->raid_disk = rdev->desc_nr; + rdev->in_sync = 1; + } else { mdp_disk_t *desc; + rdev->desc_nr = rdev->sb->this_disk.number; + desc = sb->disks + rdev->desc_nr; + rdev->raid_disk = desc->raid_disk; + rdev->in_sync = rdev->faulty = 0; - printk(KERN_WARNING "md: device name has changed from %s to %s since last import!\n", - partition_name(rdev->old_dev), partition_name(rdev->dev)); - if (rdev->desc_nr == -1) { - MD_BUG(); - goto abort; - } - desc = &sb->disks[rdev->desc_nr]; - if (!kdev_same( rdev->old_dev, mk_kdev(desc->major, desc->minor))) { - MD_BUG(); - goto abort; - } - desc->major = major(rdev->dev); - desc->minor = minor(rdev->dev); - desc = &rdev->sb->this_disk; - desc->major = major(rdev->dev); - desc->minor = minor(rdev->dev); - } - } - - /* - * Remove unavailable and faulty devices ... - * - * note that if an array becomes completely unrunnable due to - * missing devices, we do not write the superblock back, so the - * administrator has a chance to fix things up. The removal thus - * only happens if it's nonfatal to the contents of the array. - */ - for (i = 0; i < MD_SB_DISKS; i++) { - int found; - mdp_disk_t *desc; - kdev_t dev; - - desc = sb->disks + i; - dev = mk_kdev(desc->major, desc->minor); - - /* - * We kick faulty devices/descriptors immediately. - * - * Note: multipath devices are a special case. Since we - * were able to read the superblock on the path, we don't - * care if it was previously marked as faulty, it's up now - * so enable it. - */ - if (disk_faulty(desc) && mddev->sb->level != -4) { - found = 0; - ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->desc_nr != desc->number) - continue; - printk(KERN_WARNING "md%d: kicking faulty %s!\n", - mdidx(mddev),partition_name(rdev->dev)); + if (desc->state & (1<faulty = 1; kick_rdev_from_array(rdev); - found = 1; - break; - } - if (!found) { - if (kdev_none(dev)) - continue; - printk(KERN_WARNING "md%d: removing former faulty %s!\n", - mdidx(mddev), partition_name(dev)); - } - remove_descriptor(desc, sb); - continue; - } else if (disk_faulty(desc)) { - /* - * multipath entry marked as faulty, unfaulty it - */ - rdev = find_rdev(mddev, dev); - if(rdev) - mark_disk_spare(desc); - else - remove_descriptor(desc, sb); - } - - if (kdev_none(dev)) - continue; - /* - * Is this device present in the rdev ring? - */ - found = 0; - ITERATE_RDEV(mddev,rdev,tmp) { - /* - * Multi-path IO special-case: since we have no - * this_disk descriptor at auto-detect time, - * we cannot check rdev->number. - * We can check the device though. - */ - if ((sb->level == -4) && - kdev_same(rdev->dev, - mk_kdev(desc->major,desc->minor))) { - found = 1; - break; - } - if (rdev->desc_nr == desc->number) { - found = 1; - break; - } + } else if (desc->state & (1<raid_disk < mddev->raid_disks) + rdev->in_sync = 1; } - if (found) - continue; - - printk(KERN_WARNING "md%d: former device %s is unavailable, removing from array!\n", - mdidx(mddev), partition_name(dev)); - remove_descriptor(desc, sb); } - /* - * Double check wether all devices mentioned in the - * superblock are in the rdev ring. - */ - first = 1; - for (i = 0; i < MD_SB_DISKS; i++) { - mdp_disk_t *desc; - kdev_t dev; - - desc = sb->disks + i; - dev = mk_kdev(desc->major, desc->minor); - - if (kdev_none(dev)) - continue; - - if (disk_faulty(desc)) { - MD_BUG(); - goto abort; - } - - rdev = find_rdev(mddev, dev); - if (!rdev) { - MD_BUG(); - goto abort; - } - /* - * In the case of Multipath-IO, we have no - * other information source to find out which - * disk is which, only the position of the device - * in the superblock: - */ - if (mddev->sb->level == -4) { - if ((rdev->desc_nr != -1) && (rdev->desc_nr != i)) { - MD_BUG(); - goto abort; - } - rdev->desc_nr = i; - if (!first) - rdev->alias_device = 1; - else - first = 0; - } - } - - /* - * Kick all rdevs that are not in the - * descriptor array: - */ - ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->desc_nr == -1) - kick_rdev_from_array(rdev); - } - - /* - * Do a final reality check. - */ - if (mddev->sb->level != -4) { - ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->desc_nr == -1) { - MD_BUG(); - goto abort; - } - /* - * is the desc_nr unique? - */ - ITERATE_RDEV(mddev,rdev2,tmp2) { - if ((rdev2 != rdev) && - (rdev2->desc_nr == rdev->desc_nr)) { - MD_BUG(); - goto abort; - } - } - /* - * is the device unique? - */ - ITERATE_RDEV(mddev,rdev2,tmp2) { - if (rdev2 != rdev && - kdev_same(rdev2->dev, rdev->dev)) { - MD_BUG(); - goto abort; - } - } - } - } /* * Check if we can support this RAID array */ - if (sb->major_version != MD_MAJOR_VERSION || - sb->minor_version > MD_MINOR_VERSION) { + if (mddev->major_version != MD_MAJOR_VERSION || + mddev->minor_version > MD_MINOR_VERSION) { - printk(OLD_VERSION, mdidx(mddev), sb->major_version, - sb->minor_version, sb->patch_version); + printk(OLD_VERSION, mdidx(mddev), mddev->major_version, + mddev->minor_version, mddev->patch_version); goto abort; } - if ((sb->state != (1 << MD_SB_CLEAN)) && ((sb->level == 1) || - (sb->level == 4) || (sb->level == 5))) + if ((mddev->state != (1 << MD_SB_CLEAN)) && ((mddev->level == 1) || + (mddev->level == 4) || (mddev->level == 5))) printk(NOT_CLEAN_IGNORE, mdidx(mddev)); return 0; @@ -1444,9 +1258,8 @@ static int device_size_calculation(mddev_t * mddev) { - int data_disks = 0, persistent; + int data_disks = 0; unsigned int readahead; - mdp_super_t *sb = mddev->sb; struct list_head *tmp; mdk_rdev_t *rdev; @@ -1455,7 +1268,7 @@ * (we have to do this after having validated chunk_size, * because device size has to be modulo chunk_size) */ - persistent = !mddev->sb->not_persistent; + ITERATE_RDEV(mddev,rdev,tmp) { if (rdev->faulty) continue; @@ -1463,18 +1276,18 @@ MD_BUG(); continue; } - rdev->size = calc_dev_size(rdev, mddev, persistent); - if (rdev->size < sb->chunk_size / 1024) { + rdev->size = calc_dev_size(rdev->bdev, mddev); + if (rdev->size < mddev->chunk_size / 1024) { printk(KERN_WARNING "md: Dev %s smaller than chunk_size: %ldk < %dk\n", - partition_name(rdev->dev), - rdev->size, sb->chunk_size / 1024); + bdev_partition_name(rdev->bdev), + rdev->size, mddev->chunk_size / 1024); return -EINVAL; } } - switch (sb->level) { - case -4: + switch (mddev->level) { + case LEVEL_MULTIPATH: data_disks = 1; break; case -3: @@ -1483,36 +1296,36 @@ case -2: data_disks = 1; break; - case -1: + case LEVEL_LINEAR: zoned_raid_size(mddev); data_disks = 1; break; case 0: zoned_raid_size(mddev); - data_disks = sb->raid_disks; + data_disks = mddev->raid_disks; break; case 1: data_disks = 1; break; case 4: case 5: - data_disks = sb->raid_disks-1; + data_disks = mddev->raid_disks-1; break; default: - printk(UNKNOWN_LEVEL, mdidx(mddev), sb->level); + printk(UNKNOWN_LEVEL, mdidx(mddev), mddev->level); goto abort; } if (!md_size[mdidx(mddev)]) - md_size[mdidx(mddev)] = sb->size * data_disks; + md_size[mdidx(mddev)] = mddev->size * data_disks; readahead = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; - if (!sb->level || (sb->level == 4) || (sb->level == 5)) { - readahead = (mddev->sb->chunk_size>>PAGE_SHIFT) * 4 * data_disks; + if (!mddev->level || (mddev->level == 4) || (mddev->level == 5)) { + readahead = (mddev->chunk_size>>PAGE_SHIFT) * 4 * data_disks; if (readahead < data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2) readahead = data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2; } else { // (no multipath branch - it uses the default setting) - if (sb->level == -3) + if (mddev->level == -3) readahead = 0; } @@ -1562,13 +1375,13 @@ /* * Analyze all RAID superblock(s) */ - if (analyze_sbs(mddev)) { + if (!mddev->raid_disks && analyze_sbs(mddev)) { MD_BUG(); return -EINVAL; } - chunk_size = mddev->sb->chunk_size; - pnum = level_to_pers(mddev->sb->level); + chunk_size = mddev->chunk_size; + pnum = level_to_pers(mddev->level); if ((pnum != MULTIPATH) && (pnum != RAID1)) { if (!chunk_size) { @@ -1599,7 +1412,7 @@ } else if (chunk_size) printk(KERN_INFO "md: RAID level %d does not need chunksize! Continuing anyway.\n", - mddev->sb->level); + mddev->level); if (pnum >= MAX_PERSONALITY) { MD_BUG(); @@ -1633,7 +1446,8 @@ ITERATE_RDEV(mddev,rdev,tmp) { if (rdev->faulty) continue; - invalidate_device(rdev->dev, 1); + sync_blockdev(rdev->bdev); + invalidate_bdev(rdev->bdev, 0); #if 0 /* * Aside of obvious breakage (code below results in block size set @@ -1659,13 +1473,13 @@ return -EINVAL; } - mddev->in_sync = (mddev->sb->state & (1<in_sync = (mddev->state & (1<pers->sync_request) - mddev->sb->state &= ~(1 << MD_SB_CLEAN); - __md_update_sb(mddev); + mddev->state &= ~(1 << MD_SB_CLEAN); + md_update_sb(mddev); md_recover_arrays(); /* @@ -1765,16 +1579,16 @@ if (mddev->ro) mddev->ro = 0; } - if (mddev->sb) { + if (mddev->raid_disks) { /* * mark it clean only if there was no resync * interrupted. */ if (mddev->in_sync) { printk(KERN_INFO "md: marking sb clean...\n"); - mddev->sb->state |= 1 << MD_SB_CLEAN; + mddev->state |= 1 << MD_SB_CLEAN; } - __md_update_sb(mddev); + md_update_sb(mddev); } if (ro) set_device_ro(dev, 1); @@ -1821,7 +1635,7 @@ printk(KERN_INFO "md: running: "); ITERATE_RDEV(mddev,rdev,tmp) { - printk("<%s>", partition_name(rdev->dev)); + printk("<%s>", bdev_partition_name(rdev->bdev)); } printk("\n"); @@ -1838,7 +1652,7 @@ /* * lets try to run arrays based on all disks that have arrived - * until now. (those are in the ->pending list) + * until now. (those are in pending_raid_disks) * * the method: pick the first pending disk, collect all disks with * the same UUID, remove all from the pending list and put them into @@ -1858,21 +1672,20 @@ printk(KERN_INFO "md: autorun ...\n"); while (!list_empty(&pending_raid_disks)) { rdev0 = list_entry(pending_raid_disks.next, - mdk_rdev_t, pending); + mdk_rdev_t, same_set); - printk(KERN_INFO "md: considering %s ...\n", partition_name(rdev0->dev)); + printk(KERN_INFO "md: considering %s ...\n", bdev_partition_name(rdev0->bdev)); INIT_LIST_HEAD(&candidates); ITERATE_RDEV_PENDING(rdev,tmp) { if (uuid_equal(rdev0, rdev)) { if (!sb_equal(rdev0->sb, rdev->sb)) { printk(KERN_WARNING "md: %s has same UUID as %s, but superblocks differ ...\n", - partition_name(rdev->dev), partition_name(rdev0->dev)); + bdev_partition_name(rdev->bdev), bdev_partition_name(rdev0->bdev)); continue; } - printk(KERN_INFO "md: adding %s ...\n", partition_name(rdev->dev)); - list_del(&rdev->pending); - list_add(&rdev->pending, &candidates); + printk(KERN_INFO "md: adding %s ...\n", bdev_partition_name(rdev->bdev)); + list_move(&rdev->same_set, &candidates); } } /* @@ -1889,15 +1702,15 @@ if (mddev_lock(mddev)) printk(KERN_WARNING "md: md%d locked, cannot run\n", mdidx(mddev)); - else if (mddev->sb || !list_empty(&mddev->disks)) { + else if (mddev->raid_disks || !list_empty(&mddev->disks)) { printk(KERN_WARNING "md: md%d already running, cannot run %s\n", - mdidx(mddev), partition_name(rdev0->dev)); + mdidx(mddev), bdev_partition_name(rdev0->bdev)); mddev_unlock(mddev); } else { printk(KERN_INFO "md: created md%d\n", mdidx(mddev)); - ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) { + ITERATE_RDEV_GENERIC(candidates,rdev,tmp) { + list_del_init(&rdev->same_set); bind_rdev_to_array(rdev, mddev); - list_del_init(&rdev->pending); } autorun_array(mddev); mddev_unlock(mddev); @@ -1905,7 +1718,7 @@ /* on success, candidates will be empty, on error * it wont... */ - ITERATE_RDEV_GENERIC(candidates,pending,rdev,tmp) + ITERATE_RDEV_GENERIC(candidates,rdev,tmp) export_rdev(rdev); mddev_put(mddev); } @@ -1944,7 +1757,7 @@ #define AUTORUNNING KERN_INFO \ "md: auto-running md%d.\n" -static int autostart_array(kdev_t startdev) +static int autostart_array(dev_t startdev) { int err = -EINVAL, i; mdp_super_t *sb = NULL; @@ -1952,16 +1765,16 @@ start_rdev = md_import_device(startdev, 1); if (IS_ERR(start_rdev)) { - printk(KERN_WARNING "md: could not import %s!\n", partition_name(startdev)); + printk(KERN_WARNING "md: could not import %s!\n", partition_name(to_kdev_t(startdev))); goto abort; } if (start_rdev->faulty) { printk(KERN_WARNING "md: can not autostart based on faulty %s!\n", - partition_name(startdev)); + bdev_partition_name(start_rdev->bdev)); goto abort; } - list_add(&start_rdev->pending, &pending_raid_disks); + list_add(&start_rdev->same_set, &pending_raid_disks); sb = start_rdev->sb; @@ -1975,22 +1788,22 @@ for (i = 0; i < MD_SB_DISKS; i++) { mdp_disk_t *desc; - kdev_t dev; + dev_t dev; desc = sb->disks + i; - dev = mk_kdev(desc->major, desc->minor); + dev = MKDEV(desc->major, desc->minor); - if (kdev_none(dev)) + if (!dev) continue; - if (kdev_same(dev, startdev)) + if (dev == startdev) continue; rdev = md_import_device(dev, 1); if (IS_ERR(rdev)) { printk(KERN_WARNING "md: could not import %s, trying to run array nevertheless.\n", - partition_name(dev)); + partition_name(to_kdev_t(dev))); continue; } - list_add(&rdev->pending, &pending_raid_disks); + list_add(&rdev->same_set, &pending_raid_disks); } /* @@ -2029,36 +1842,48 @@ return 0; } -#define SET_FROM_SB(x) info.x = mddev->sb->x static int get_array_info(mddev_t * mddev, void * arg) { mdu_array_info_t info; + int nr,working,active,failed,spare; + mdk_rdev_t *rdev; + struct list_head *tmp; - if (!mddev->sb) { - MD_BUG(); - return -EINVAL; + nr=working=active=failed=spare=0; + ITERATE_RDEV(mddev,rdev,tmp) { + nr++; + if (rdev->faulty) + failed++; + else { + working++; + if (rdev->in_sync) + active++; + else + spare++; + } } - SET_FROM_SB(major_version); - SET_FROM_SB(minor_version); - SET_FROM_SB(patch_version); - SET_FROM_SB(ctime); - SET_FROM_SB(level); - SET_FROM_SB(size); - SET_FROM_SB(nr_disks); - SET_FROM_SB(raid_disks); - SET_FROM_SB(md_minor); - SET_FROM_SB(not_persistent); - - SET_FROM_SB(utime); - SET_FROM_SB(state); - SET_FROM_SB(active_disks); - SET_FROM_SB(working_disks); - SET_FROM_SB(failed_disks); - SET_FROM_SB(spare_disks); + info.major_version = mddev->major_version; + info.major_version = mddev->major_version; + info.minor_version = mddev->minor_version; + info.patch_version = mddev->patch_version; + info.ctime = mddev->ctime; + info.level = mddev->level; + info.size = mddev->size; + info.nr_disks = nr; + info.raid_disks = mddev->raid_disks; + info.md_minor = mddev->__minor; + info.not_persistent= !mddev->persistent; + + info.utime = mddev->utime; + info.state = mddev->state; + info.active_disks = active; + info.working_disks = working; + info.failed_disks = failed; + info.spare_disks = spare; - SET_FROM_SB(layout); - SET_FROM_SB(chunk_size); + info.layout = mddev->layout; + info.chunk_size = mddev->chunk_size; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; @@ -2067,14 +1892,12 @@ } #undef SET_FROM_SB -#define SET_FROM_SB(x) info.x = mddev->sb->disks[nr].x + static int get_disk_info(mddev_t * mddev, void * arg) { mdu_disk_info_t info; unsigned int nr; - - if (!mddev->sb) - return -EINVAL; + mdk_rdev_t *rdev; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; @@ -2083,52 +1906,55 @@ if (nr >= MD_SB_DISKS) return -EINVAL; - SET_FROM_SB(major); - SET_FROM_SB(minor); - SET_FROM_SB(raid_disk); - SET_FROM_SB(state); + rdev = find_rdev_nr(mddev, nr); + if (rdev) { + info.major = MAJOR(rdev->bdev->bd_dev); + info.minor = MINOR(rdev->bdev->bd_dev); + info.raid_disk = rdev->raid_disk; + info.state = 0; + if (rdev->faulty) + info.state |= (1<in_sync) { + info.state |= (1<sb->disks[nr].x = info->x static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) { - int size, persistent; + int size; mdk_rdev_t *rdev; - unsigned int nr; - kdev_t dev; - dev = mk_kdev(info->major,info->minor); - - if (find_rdev_all(dev)) { - printk(KERN_WARNING "md: device %s already used in a RAID array!\n", - partition_name(dev)); - return -EBUSY; - } - if (!mddev->sb) { + dev_t dev; + dev = MKDEV(info->major,info->minor); + if (!mddev->raid_disks) { /* expecting a device which has a superblock */ rdev = md_import_device(dev, 1); if (IS_ERR(rdev)) { printk(KERN_WARNING "md: md_import_device returned %ld\n", PTR_ERR(rdev)); - return -EINVAL; + return PTR_ERR(rdev); } if (!list_empty(&mddev->disks)) { mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, mdk_rdev_t, same_set); if (!uuid_equal(rdev0, rdev)) { printk(KERN_WARNING "md: %s has different UUID to %s\n", - partition_name(rdev->dev), partition_name(rdev0->dev)); + bdev_partition_name(rdev->bdev), bdev_partition_name(rdev0->bdev)); export_rdev(rdev); return -EINVAL; } if (!sb_equal(rdev0->sb, rdev->sb)) { printk(KERN_WARNING "md: %s has same UUID but different superblock to %s\n", - partition_name(rdev->dev), partition_name(rdev0->dev)); + bdev_partition_name(rdev->bdev), bdev_partition_name(rdev0->bdev)); export_rdev(rdev); return -EINVAL; } @@ -2137,61 +1963,45 @@ return 0; } - nr = info->number; - if (nr >= mddev->sb->nr_disks) { - MD_BUG(); - return -EINVAL; - } - - - SET_SB(number); - SET_SB(major); - SET_SB(minor); - SET_SB(raid_disk); - SET_SB(state); - if (!(info->state & (1<old_dev = dev; rdev->desc_nr = info->number; + rdev->raid_disk = info->raid_disk; + rdev->faulty = 0; + if (rdev->raid_disk < mddev->raid_disks) + rdev->in_sync = (info->state & (1<in_sync = 0; bind_rdev_to_array(rdev, mddev); - persistent = !mddev->sb->not_persistent; - if (!persistent) + if (!mddev->persistent) printk(KERN_INFO "md: nonpersistent superblock ...\n"); - size = calc_dev_size(rdev, mddev, persistent); - rdev->sb_offset = calc_dev_sboffset(rdev, mddev, persistent); + size = calc_dev_size(rdev->bdev, mddev); + rdev->sb_offset = calc_dev_sboffset(rdev->bdev); - if (!mddev->sb->size || (mddev->sb->size > size)) - mddev->sb->size = size; + if (!mddev->size || (mddev->size > size)) + mddev->size = size; } - /* - * sync all other superblocks with the main superblock - */ - sync_sbs(mddev); - return 0; } -#undef SET_SB -static int hot_generate_error(mddev_t * mddev, kdev_t dev) +static int hot_generate_error(mddev_t * mddev, dev_t dev) { struct request_queue *q; mdk_rdev_t *rdev; - mdp_disk_t *disk; if (!mddev->pers) return -ENODEV; printk(KERN_INFO "md: trying to generate %s error in md%d ... \n", - partition_name(dev), mdidx(mddev)); + partition_name(to_kdev_t(dev)), mdidx(mddev)); rdev = find_rdev(mddev, dev); if (!rdev) { @@ -2203,8 +2013,7 @@ MD_BUG(); return -EINVAL; } - disk = &mddev->sb->disks[rdev->desc_nr]; - if (!disk_active(disk)) + if (!rdev->in_sync) return -ENODEV; q = bdev_get_queue(rdev->bdev); @@ -2218,17 +2027,16 @@ return 0; } -static int hot_remove_disk(mddev_t * mddev, kdev_t dev) +static int hot_remove_disk(mddev_t * mddev, dev_t dev) { int err; mdk_rdev_t *rdev; - mdp_disk_t *disk; if (!mddev->pers) return -ENODEV; printk(KERN_INFO "md: trying to remove %s from md%d ... \n", - partition_name(dev), mdidx(mddev)); + partition_name(to_kdev_t(dev)), mdidx(mddev)); if (!mddev->pers->hot_remove_disk) { printk(KERN_WARNING "md%d: personality does not support diskops!\n", @@ -2240,21 +2048,10 @@ if (!rdev) return -ENXIO; - if (rdev->desc_nr == -1) { - MD_BUG(); - return -EINVAL; - } - disk = &mddev->sb->disks[rdev->desc_nr]; - if (disk_active(disk)) { - MD_BUG(); + if (rdev->in_sync && ! rdev->faulty) goto busy; - } - if (disk_removed(disk)) { - MD_BUG(); - return -EINVAL; - } - err = mddev->pers->hot_remove_disk(mddev, disk->number); + err = mddev->pers->hot_remove_disk(mddev, rdev->raid_disk); if (err == -EBUSY) { MD_BUG(); goto busy; @@ -2264,29 +2061,27 @@ return -EINVAL; } - remove_descriptor(disk, mddev->sb); kick_rdev_from_array(rdev); - __md_update_sb(mddev); + md_update_sb(mddev); return 0; busy: printk(KERN_WARNING "md: cannot remove active disk %s from md%d ... \n", - partition_name(dev), mdidx(mddev)); + bdev_partition_name(rdev->bdev), mdidx(mddev)); return -EBUSY; } -static int hot_add_disk(mddev_t * mddev, kdev_t dev) +static int hot_add_disk(mddev_t * mddev, dev_t dev) { - int i, err, persistent; + int i, err; unsigned int size; mdk_rdev_t *rdev; - mdp_disk_t *disk; if (!mddev->pers) return -ENODEV; printk(KERN_INFO "md: trying to hot-add %s to md%d ... \n", - partition_name(dev), mdidx(mddev)); + partition_name(to_kdev_t(dev)), mdidx(mddev)); if (!mddev->pers->hot_add_disk) { printk(KERN_WARNING "md%d: personality does not support diskops!\n", @@ -2294,50 +2089,41 @@ return -EINVAL; } - rdev = find_rdev(mddev, dev); - if (rdev) - return -EBUSY; - rdev = md_import_device (dev, 0); if (IS_ERR(rdev)) { printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); return -EINVAL; } - persistent = !mddev->sb->not_persistent; - size = calc_dev_size(rdev, mddev, persistent); - if (size < mddev->sb->size) { - printk(KERN_WARNING "md%d: disk size %d blocks < array size %d\n", - mdidx(mddev), size, mddev->sb->size); + size = calc_dev_size(rdev->bdev, mddev); + + if (size < mddev->size) { + printk(KERN_WARNING "md%d: disk size %d blocks < array size %ld\n", + mdidx(mddev), size, mddev->size); err = -ENOSPC; goto abort_export; } if (rdev->faulty) { printk(KERN_WARNING "md: can not hot-add faulty %s disk to md%d!\n", - partition_name(dev), mdidx(mddev)); + bdev_partition_name(rdev->bdev), mdidx(mddev)); err = -EINVAL; goto abort_export; } + rdev->in_sync = 0; bind_rdev_to_array(rdev, mddev); /* * The rest should better be atomic, we can have disk failures * noticed in interrupt contexts ... */ - rdev->old_dev = dev; rdev->size = size; - rdev->sb_offset = calc_dev_sboffset(rdev, mddev, persistent); - - disk = mddev->sb->disks + mddev->sb->raid_disks; - for (i = mddev->sb->raid_disks; i < MD_SB_DISKS; i++) { - disk = mddev->sb->disks + i; + rdev->sb_offset = calc_dev_sboffset(rdev->bdev); - if (!disk->major && !disk->minor) + for (i = mddev->raid_disks; i < MD_SB_DISKS; i++) + if (find_rdev_nr(mddev,i)==NULL) break; - if (disk_removed(disk)) - break; - } + if (i == MD_SB_DISKS) { printk(KERN_WARNING "md%d: can not hot-add to full array!\n", mdidx(mddev)); @@ -2345,35 +2131,16 @@ goto abort_unbind_export; } - if (disk_removed(disk)) { - /* - * reuse slot - */ - if (disk->number != i) { - MD_BUG(); - err = -EINVAL; - goto abort_unbind_export; - } - } else { - disk->number = i; - } - - disk->raid_disk = disk->number; - disk->major = major(dev); - disk->minor = minor(dev); + rdev->desc_nr = i; + rdev->raid_disk = i; - if (mddev->pers->hot_add_disk(mddev, disk, rdev)) { + if (mddev->pers->hot_add_disk(mddev, rdev)) { MD_BUG(); err = -EINVAL; goto abort_unbind_export; } - mark_disk_spare(disk); - mddev->sb->nr_disks++; - mddev->sb->spare_disks++; - mddev->sb->working_disks++; - - __md_update_sb(mddev); + md_update_sb(mddev); /* * Kick recovery, maybe this spare has to be added to the @@ -2391,49 +2158,37 @@ return err; } -#define SET_SB(x) mddev->sb->x = info->x static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) { - if (alloc_array_sb(mddev)) - return -ENOMEM; + mddev->major_version = MD_MAJOR_VERSION; + mddev->minor_version = MD_MINOR_VERSION; + mddev->patch_version = MD_PATCHLEVEL_VERSION; + mddev->ctime = CURRENT_TIME; + + mddev->level = info->level; + mddev->size = info->size; + mddev->raid_disks = info->raid_disks; + /* don't set __minor, it is determined by which /dev/md* was + * openned + */ + mddev->state = info->state; + mddev->persistent = ! info->not_persistent; - mddev->sb->major_version = MD_MAJOR_VERSION; - mddev->sb->minor_version = MD_MINOR_VERSION; - mddev->sb->patch_version = MD_PATCHLEVEL_VERSION; - mddev->sb->ctime = CURRENT_TIME; - - SET_SB(level); - SET_SB(size); - SET_SB(nr_disks); - SET_SB(raid_disks); - SET_SB(md_minor); - SET_SB(not_persistent); - - SET_SB(state); - SET_SB(active_disks); - SET_SB(working_disks); - SET_SB(failed_disks); - SET_SB(spare_disks); + mddev->layout = info->layout; + mddev->chunk_size = info->chunk_size; - SET_SB(layout); - SET_SB(chunk_size); - mddev->sb->md_magic = MD_SB_MAGIC; /* * Generate a 128 bit UUID */ - get_random_bytes(&mddev->sb->set_uuid0, 4); - get_random_bytes(&mddev->sb->set_uuid1, 4); - get_random_bytes(&mddev->sb->set_uuid2, 4); - get_random_bytes(&mddev->sb->set_uuid3, 4); + get_random_bytes(mddev->uuid, 16); return 0; } -#undef SET_SB -static int set_disk_faulty(mddev_t *mddev, kdev_t dev) +static int set_disk_faulty(mddev_t *mddev, dev_t dev) { mdk_rdev_t *rdev; int ret; @@ -2527,7 +2282,7 @@ /* START_ARRAY doesn't need to lock the array as autostart_array * does the locking, and it could even be a different array */ - err = autostart_array(val_to_kdev(arg)); + err = autostart_array(arg); if (err) { printk(KERN_WARNING "md: autostart %s failed!\n", partition_name(val_to_kdev(arg))); @@ -2553,8 +2308,8 @@ err = -EBUSY; goto abort_unlock; } - if (mddev->sb) { - printk(KERN_WARNING "md: array md%d already has a superblock!\n", + if (mddev->raid_disks) { + printk(KERN_WARNING "md: array md%d already initialised!\n", mdidx(mddev)); err = -EBUSY; goto abort_unlock; @@ -2579,8 +2334,8 @@ /* * Commands querying/configuring an existing array: */ - /* if we don't have a superblock yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */ - if (!mddev->sb && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) { + /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */ + if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) { err = -ENODEV; goto abort_unlock; } @@ -2658,18 +2413,18 @@ goto done_unlock; } case HOT_GENERATE_ERROR: - err = hot_generate_error(mddev, val_to_kdev(arg)); + err = hot_generate_error(mddev, arg); goto done_unlock; case HOT_REMOVE_DISK: - err = hot_remove_disk(mddev, val_to_kdev(arg)); + err = hot_remove_disk(mddev, arg); goto done_unlock; case HOT_ADD_DISK: - err = hot_add_disk(mddev, val_to_kdev(arg)); + err = hot_add_disk(mddev, arg); goto done_unlock; case SET_DISK_FAULTY: - err = set_disk_faulty(mddev, val_to_kdev(arg)); + err = set_disk_faulty(mddev, arg); goto done_unlock; case RUN_ARRAY: @@ -2883,10 +2638,9 @@ int md_error(mddev_t *mddev, struct block_device *bdev) { mdk_rdev_t * rrdev; - kdev_t rdev = to_kdev_t(bdev->bd_dev); dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", - MD_MAJOR,mdidx(mddev),major(rdev),minor(rdev), + MD_MAJOR,mdidx(mddev),MAJOR(bdev->bd_dev),MINOR(bdev->bd_dev), __builtin_return_address(0),__builtin_return_address(1), __builtin_return_address(2),__builtin_return_address(3)); @@ -2894,12 +2648,13 @@ MD_BUG(); return 0; } - rrdev = find_rdev(mddev, rdev); + rrdev = find_rdev_bdev(mddev, bdev); if (!rrdev || rrdev->faulty) return 0; if (!mddev->pers->error_handler || mddev->pers->error_handler(mddev,bdev) <= 0) { rrdev->faulty = 1; + rrdev->in_sync = 0; } else return 1; /* @@ -2920,15 +2675,10 @@ sz += sprintf(page + sz, "unused devices: "); - ITERATE_RDEV_ALL(rdev,tmp) { - if (list_empty(&rdev->same_set)) { - /* - * The device is not yet used by any array. - */ - i++; - sz += sprintf(page + sz, "%s ", - partition_name(rdev->dev)); - } + ITERATE_RDEV_PENDING(rdev,tmp) { + i++; + sz += sprintf(page + sz, "%s ", + bdev_partition_name(rdev->bdev)); } if (!i) sz += sprintf(page + sz, ""); @@ -2944,7 +2694,7 @@ unsigned long max_blocks, resync, res, dt, db, rt; resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; - max_blocks = mddev->sb->size; + max_blocks = mddev->size; /* * Should not happen. @@ -3016,7 +2766,7 @@ size = 0; ITERATE_RDEV(mddev,rdev,tmp2) { sz += sprintf(page + sz, " %s[%d]", - partition_name(rdev->dev), rdev->desc_nr); + bdev_partition_name(rdev->bdev), rdev->desc_nr); if (rdev->faulty) { sz += sprintf(page + sz, "(F)"); continue; @@ -3083,28 +2833,18 @@ return 0; } -mdp_disk_t *get_spare(mddev_t *mddev) +static mdk_rdev_t *get_spare(mddev_t *mddev) { - mdp_super_t *sb = mddev->sb; - mdp_disk_t *disk; mdk_rdev_t *rdev; struct list_head *tmp; ITERATE_RDEV(mddev,rdev,tmp) { if (rdev->faulty) continue; - if (!rdev->sb) { - MD_BUG(); + if (rdev->in_sync) continue; - } - disk = &sb->disks[rdev->desc_nr]; - if (disk_faulty(disk)) { - MD_BUG(); - continue; - } - if (disk_active(disk)) - continue; - return disk; + + return rdev; } return NULL; } @@ -3132,8 +2872,9 @@ idle = 1; ITERATE_RDEV(mddev,rdev,tmp) { - int major = major(rdev->dev); - int idx = disk_index(rdev->dev); + kdev_t dev = to_kdev_t(rdev->bdev->bd_dev); + int major = major(dev); + int idx = disk_index(dev); if ((idx >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR)) continue; @@ -3213,7 +2954,7 @@ } } while (mddev->curr_resync < 2); - max_sectors = mddev->sb->size << 1; + max_sectors = mddev->size << 1; printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev)); printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed: %d KB/sec/disc.\n", sysctl_speed_limit_min); @@ -3337,15 +3078,15 @@ void md_do_recovery(void *data) { mddev_t *mddev; - mdp_super_t *sb; struct list_head *tmp; dprintk(KERN_INFO "md: recovery thread got woken up ...\n"); ITERATE_MDDEV(mddev,tmp) if (mddev_lock(mddev)==0) { - sb = mddev->sb; - if (!sb || !mddev->pers || mddev->ro) + if (!mddev->raid_disks || !mddev->pers || mddev->ro) goto unlock; + if (mddev->sb_dirty) + md_update_sb(mddev); if (mddev->recovery_running > 0) /* resync/recovery still happening */ goto unlock; @@ -3369,16 +3110,12 @@ goto unlock; /* success...*/ if (mddev->spare) { - mddev->pers->spare_active(mddev, - &mddev->spare); - mark_disk_sync(mddev->spare); - mark_disk_active(mddev->spare); - sb->active_disks++; - sb->spare_disks--; + mddev->pers->spare_active(mddev); + mddev->spare->in_sync = 1; mddev->spare = NULL; } } - __md_update_sb(mddev); + md_update_sb(mddev); mddev->recovery_running = 0; wake_up(&resync_wait); goto unlock; @@ -3389,14 +3126,14 @@ wake_up(&resync_wait); } - if (sb->active_disks < sb->raid_disks) { + if (mddev->degraded) { mddev->spare = get_spare(mddev); if (!mddev->spare) printk(KERN_ERR "md%d: no spare disk to reconstruct array! " "-- continuing in degraded mode\n", mdidx(mddev)); else printk(KERN_INFO "md%d: resyncing spare disk %s to replace failed disk\n", - mdidx(mddev), partition_name(mk_kdev(mddev->spare->major,mddev->spare->minor))); + mdidx(mddev), bdev_partition_name(mddev->spare->bdev)); } if (!mddev->spare && mddev->in_sync) { /* nothing we can do ... */ @@ -3414,8 +3151,7 @@ mddev->recovery_running = 0; } else { if (mddev->spare) - mddev->pers->spare_write(mddev, - mddev->spare->number); + mddev->pers->spare_write(mddev); mddev->recovery_running = 1; md_wakeup_thread(mddev->sync_thread); } @@ -3549,10 +3285,10 @@ * Searches all registered partitions for autorun RAID arrays * at boot time. */ -static kdev_t detected_devices[128]; +static dev_t detected_devices[128]; static int dev_cnt; -void md_autodetect_dev(kdev_t dev) +void md_autodetect_dev(dev_t dev) { if (dev_cnt >= 0 && dev_cnt < 127) detected_devices[dev_cnt++] = dev; @@ -3567,19 +3303,19 @@ printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); for (i = 0; i < dev_cnt; i++) { - kdev_t dev = detected_devices[i]; + dev_t dev = detected_devices[i]; rdev = md_import_device(dev,1); if (IS_ERR(rdev)) { printk(KERN_ALERT "md: could not import %s!\n", - partition_name(dev)); + partition_name(to_kdev_t(dev))); continue; } if (rdev->faulty) { MD_BUG(); continue; } - list_add(&rdev->pending, &pending_raid_disks); + list_add(&rdev->same_set, &pending_raid_disks); } dev_cnt = 0; @@ -3603,7 +3339,7 @@ * invoked program now). Added ability to initialise all * the MD devices (by specifying multiple "md=" lines) * instead of just one. -- KTK - * 18May2000: Added support for persistant-superblock arrays: + * 18May2000: Added support for persistent-superblock arrays: * md=n,0,factor,fault,device-list uses RAID0 for device n * md=n,-1,factor,fault,device-list uses LINEAR for device n * md=n,device-list reads a RAID superblock from the devices @@ -3615,7 +3351,7 @@ */ static int __init md_setup(char *str) { - int minor, level, factor, fault; + int minor, level, factor, fault, pers; char *pername = ""; char *str1 = str; @@ -3632,7 +3368,7 @@ } switch (get_option(&str, &level)) { /* RAID Personality */ case 2: /* could be 0 or -1.. */ - if (!level || level == -1) { + if (level == 0 || level == LEVEL_LINEAR) { if (get_option(&str, &factor) != 2 || /* Chunk Size */ get_option(&str, &fault) != 2) { printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); @@ -3641,12 +3377,12 @@ md_setup_args.pers[minor] = level; md_setup_args.chunk[minor] = 1 << (factor+12); switch(level) { - case -1: - level = LINEAR; + case LEVEL_LINEAR: + pers = LINEAR; pername = "linear"; break; case 0: - level = RAID0; + pers = RAID0; pername = "raid0"; break; default: @@ -3655,7 +3391,7 @@ level); return 0; } - md_setup_args.pers[minor] = level; + md_setup_args.pers[minor] = pers; break; } /* FALL THROUGH */ @@ -3737,7 +3473,7 @@ continue; } - if (mddev->sb || !list_empty(&mddev->disks)) { + if (mddev->raid_disks || !list_empty(&mddev->disks)) { printk(KERN_WARNING "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n", minor); @@ -3756,10 +3492,6 @@ ainfo.not_persistent = 1; ainfo.state = (1 << MD_SB_CLEAN); - ainfo.active_disks = 0; - ainfo.working_disks = 0; - ainfo.failed_disks = 0; - ainfo.spare_disks = 0; ainfo.layout = 0; ainfo.chunk_size = md_setup_args.chunk[minor]; err = set_array_info(mddev, &ainfo); @@ -3772,10 +3504,7 @@ dinfo.state = (1<sb->nr_disks++; - mddev->sb->raid_disks++; - mddev->sb->active_disks++; - mddev->sb->working_disks++; + mddev->raid_disks++; err = add_new_disk (mddev, &dinfo); } } else { @@ -3886,10 +3615,8 @@ EXPORT_SYMBOL(md_done_sync); EXPORT_SYMBOL(md_register_thread); EXPORT_SYMBOL(md_unregister_thread); -EXPORT_SYMBOL(md_update_sb); EXPORT_SYMBOL(md_wakeup_thread); EXPORT_SYMBOL(md_print_devices); EXPORT_SYMBOL(find_rdev_nr); EXPORT_SYMBOL(md_interrupt_thread); -EXPORT_SYMBOL(get_spare); MODULE_LICENSE("GPL"); diff -Nru a/drivers/md/multipath.c b/drivers/md/multipath.c --- a/drivers/md/multipath.c Sat Jul 20 12:12:35 2002 +++ b/drivers/md/multipath.c Sat Jul 20 12:12:35 2002 @@ -55,90 +55,21 @@ static spinlock_t retry_list_lock = SPIN_LOCK_UNLOCKED; struct multipath_bh *multipath_retry_list = NULL, **multipath_retry_tail; -static int multipath_spare_write(mddev_t *, int); -static int multipath_spare_active(mddev_t *mddev, mdp_disk_t **d); -static struct multipath_bh *multipath_alloc_mpbh(multipath_conf_t *conf) +static void *mp_pool_alloc(int gfp_flags, void *data) { - struct multipath_bh *mp_bh = NULL; - - do { - spin_lock_irq(&conf->device_lock); - if (!conf->freer1_blocked && conf->freer1) { - mp_bh = conf->freer1; - conf->freer1 = mp_bh->next_mp; - conf->freer1_cnt--; - mp_bh->next_mp = NULL; - mp_bh->state = (1 << MPBH_PreAlloc); - } - spin_unlock_irq(&conf->device_lock); - if (mp_bh) - return mp_bh; - mp_bh = (struct multipath_bh *) kmalloc(sizeof(struct multipath_bh), - GFP_NOIO); - if (mp_bh) { - memset(mp_bh, 0, sizeof(*mp_bh)); - return mp_bh; - } - conf->freer1_blocked = 1; - wait_disk_event(conf->wait_buffer, - !conf->freer1_blocked || - conf->freer1_cnt > NR_RESERVED_BUFS/2 - ); - conf->freer1_blocked = 0; - } while (1); + struct multipath_bh *mpb; + mpb = kmalloc(sizeof(*mpb), gfp_flags); + if (mpb) + memset(mpb, 0, sizeof(*mpb)); + return mpb; } -static inline void multipath_free_mpbh(struct multipath_bh *mp_bh) +static void mp_pool_free(void *mpb, void *data) { - multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); - - if (test_bit(MPBH_PreAlloc, &mp_bh->state)) { - unsigned long flags; - mp_bh->bio = NULL; - spin_lock_irqsave(&conf->device_lock, flags); - mp_bh->next_mp = conf->freer1; - conf->freer1 = mp_bh; - conf->freer1_cnt++; - spin_unlock_irqrestore(&conf->device_lock, flags); - wake_up(&conf->wait_buffer); - } else { - kfree(mp_bh); - } + kfree(mpb); } -static int multipath_grow_mpbh (multipath_conf_t *conf, int cnt) -{ - int i = 0; - - while (i < cnt) { - struct multipath_bh *mp_bh; - mp_bh = (struct multipath_bh*)kmalloc(sizeof(*mp_bh), GFP_KERNEL); - if (!mp_bh) - break; - memset(mp_bh, 0, sizeof(*mp_bh)); - set_bit(MPBH_PreAlloc, &mp_bh->state); - mp_bh->mddev = conf->mddev; - - multipath_free_mpbh(mp_bh); - i++; - } - return i; -} - -static void multipath_shrink_mpbh(multipath_conf_t *conf) -{ - spin_lock_irq(&conf->device_lock); - while (conf->freer1) { - struct multipath_bh *mp_bh = conf->freer1; - conf->freer1 = mp_bh->next_mp; - conf->freer1_cnt--; - kfree(mp_bh); - } - spin_unlock_irq(&conf->device_lock); -} - - static int multipath_map (mddev_t *mddev, struct block_device **bdev) { multipath_conf_t *conf = mddev_to_conf(mddev); @@ -185,35 +116,18 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int uptodate) { struct bio *bio = mp_bh->master_bio; + multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); bio_endio(bio, uptodate); - bio_put(mp_bh->bio); - multipath_free_mpbh(mp_bh); + mempool_free(mp_bh, conf->pool); } void multipath_end_request(struct bio *bio) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); - - /* - * this branch is our 'one multipath IO has finished' event handler: - */ - if (!uptodate) - md_error (mp_bh->mddev, bio->bi_bdev); - else - /* - * Set MPBH_Uptodate in our master buffer_head, so that - * we will return a good error code for to the higher - * levels even if IO on some other multipathed buffer fails. - * - * The 'master' represents the complex operation to - * user-side. So if something waits for IO, then it will - * wait for the 'master' buffer_head. - */ - set_bit (MPBH_Uptodate, &mp_bh->state); - - + multipath_conf_t *conf; + struct block_device *bdev; if (uptodate) { multipath_end_bh_io(mp_bh, uptodate); return; @@ -221,8 +135,11 @@ /* * oops, IO error: */ + conf = mddev_to_conf(mp_bh->mddev); + bdev = conf->multipaths[mp_bh->path].bdev; + md_error (mp_bh->mddev, bdev); printk(KERN_ERR "multipath: %s: rescheduling sector %lu\n", - bdev_partition_name(bio->bi_bdev), bio->bi_sector); + bdev_partition_name(bdev), bio->bi_sector); multipath_reschedule_retry(mp_bh); return; } @@ -236,7 +153,7 @@ { int disk; - for (disk = 0; disk < conf->raid_disks; disk++) + for (disk = 0; disk < MD_SB_DISKS; disk++) if (conf->multipaths[disk].operational) return disk; BUG(); @@ -247,28 +164,25 @@ { mddev_t *mddev = q->queuedata; multipath_conf_t *conf = mddev_to_conf(mddev); - struct bio *real_bio; struct multipath_bh * mp_bh; struct multipath_info *multipath; - mp_bh = multipath_alloc_mpbh (conf); + mp_bh = mempool_alloc(conf->pool, GFP_NOIO); mp_bh->master_bio = bio; mp_bh->mddev = mddev; - mp_bh->cmd = bio_data_dir(bio); /* * read balancing logic: */ - multipath = conf->multipaths + multipath_read_balance(conf); + mp_bh->path = multipath_read_balance(conf); + multipath = conf->multipaths + mp_bh->path; - real_bio = bio_clone(bio, GFP_NOIO); - real_bio->bi_bdev = multipath->bdev; - real_bio->bi_rw = bio_data_dir(bio); - real_bio->bi_end_io = multipath_end_request; - real_bio->bi_private = mp_bh; - mp_bh->bio = real_bio; - generic_make_request(real_bio); + mp_bh->bio = *bio; + mp_bh->bio.bi_bdev = multipath->bdev; + mp_bh->bio.bi_end_io = multipath_end_request; + mp_bh->bio.bi_private = mp_bh; + generic_make_request(&mp_bh->bio); return 0; } @@ -300,17 +214,9 @@ { multipath_conf_t *conf = mddev_to_conf(mddev); struct multipath_info *multipath = conf->multipaths+failed; - mdp_super_t *sb = mddev->sb; multipath->operational = 0; - mark_disk_faulty(sb->disks+multipath->number); - mark_disk_nonsync(sb->disks+multipath->number); - mark_disk_inactive(sb->disks+multipath->number); - sb->active_disks--; - sb->working_disks--; - sb->failed_disks++; mddev->sb_dirty = 1; - md_wakeup_thread(conf->thread); conf->working_disks--; printk (DISK_FAILED, bdev_partition_name (multipath->bdev), conf->working_disks); @@ -324,20 +230,10 @@ multipath_conf_t *conf = mddev_to_conf(mddev); struct multipath_info * multipaths = conf->multipaths; int disks = MD_SB_DISKS; - int other_paths = 1; int i; - if (conf->working_disks == 1) { - other_paths = 0; - for (i = 0; i < disks; i++) { - if (multipaths[i].spare) { - other_paths = 1; - break; - } - } - } - if (!other_paths) { + if (conf->working_disks <= 1) { /* * Uh oh, we can do nothing if this is our last path, but * first check if this is a queued request for a device @@ -348,6 +244,7 @@ return 0; } printk (LAST_DISK); + return 1; /* leave it active... it's all we have */ } else { /* * Mark disk as unusable @@ -358,24 +255,6 @@ break; } } - if (!conf->working_disks) { - int err = 1; - mdp_disk_t *spare; - mdp_super_t *sb = mddev->sb; - - spare = get_spare(mddev); - if (spare) { - err = multipath_spare_write(mddev, spare->number); - printk("got DISKOP_SPARE_WRITE err: %d. (spare_faulty(): %d)\n", err, disk_faulty(spare)); - } - if (!err && !disk_faulty(spare)) { - multipath_spare_active(mddev, &spare); - mark_disk_sync(spare); - mark_disk_active(spare); - sb->active_disks++; - sb->spare_disks--; - } - } } return 0; } @@ -395,213 +274,34 @@ printk("(conf==NULL)\n"); return; } - printk(" --- wd:%d rd:%d nd:%d\n", conf->working_disks, - conf->raid_disks, conf->nr_disks); + printk(" --- wd:%d rd:%d\n", conf->working_disks, + conf->raid_disks); for (i = 0; i < MD_SB_DISKS; i++) { tmp = conf->multipaths + i; - if (tmp->spare || tmp->operational || tmp->number || - tmp->raid_disk || tmp->used_slot) - printk(" disk%d, s:%d, o:%d, n:%d rd:%d us:%d dev:%s\n", - i, tmp->spare,tmp->operational, - tmp->number,tmp->raid_disk,tmp->used_slot, + if (tmp->operational || tmp->used_slot) + printk(" disk%d, o:%d, us:%d dev:%s\n", + i,tmp->operational, + tmp->used_slot, bdev_partition_name(tmp->bdev)); } } -/* - * Find the spare disk ... (can only be in the 'high' area of the array) - */ -static struct multipath_info *find_spare(mddev_t *mddev, int number) -{ - multipath_conf_t *conf = mddev->private; - int i; - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - struct multipath_info *p = conf->multipaths + i; - if (p->spare && p->number == number) - return p; - } - return NULL; -} -static int multipath_spare_inactive(mddev_t *mddev) -{ - multipath_conf_t *conf = mddev->private; - struct multipath_info *p; - int err = 0; - - print_multipath_conf(conf); - spin_lock_irq(&conf->device_lock); - p = find_spare(mddev, mddev->spare->number); - if (p) { - p->operational = 0; - } else { - MD_BUG(); - err = 1; - } - spin_unlock_irq(&conf->device_lock); - - print_multipath_conf(conf); - return err; -} - -static int multipath_spare_write(mddev_t *mddev, int number) -{ - multipath_conf_t *conf = mddev->private; - struct multipath_info *p; - int err = 0; - - print_multipath_conf(conf); - spin_lock_irq(&conf->device_lock); - p = find_spare(mddev, number); - if (p) { - p->operational = 1; - } else { - MD_BUG(); - err = 1; - } - spin_unlock_irq(&conf->device_lock); - - print_multipath_conf(conf); - return err; -} - -static int multipath_spare_active(mddev_t *mddev, mdp_disk_t **d) -{ - int err = 0; - int i, failed_disk=-1, spare_disk=-1; - multipath_conf_t *conf = mddev->private; - struct multipath_info *tmp, *sdisk, *fdisk; - mdp_super_t *sb = mddev->sb; - mdp_disk_t *failed_desc, *spare_desc; - mdk_rdev_t *spare_rdev, *failed_rdev; - - print_multipath_conf(conf); - spin_lock_irq(&conf->device_lock); - /* - * Find the failed disk within the MULTIPATH configuration ... - * (this can only be in the first conf->working_disks part) - */ - for (i = 0; i < conf->raid_disks; i++) { - tmp = conf->multipaths + i; - if ((!tmp->operational && !tmp->spare) || - !tmp->used_slot) { - failed_disk = i; - break; - } - } - /* - * When we activate a spare disk we _must_ have a disk in - * the lower (active) part of the array to replace. - */ - if (failed_disk == -1) { - MD_BUG(); - err = 1; - goto abort; - } - /* - * Find the spare disk ... (can only be in the 'high' - * area of the array) - */ - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - tmp = conf->multipaths + i; - if (tmp->spare && tmp->number == (*d)->number) { - spare_disk = i; - break; - } - } - if (spare_disk == -1) { - MD_BUG(); - err = 1; - goto abort; - } - - sdisk = conf->multipaths + spare_disk; - fdisk = conf->multipaths + failed_disk; - - spare_desc = &sb->disks[sdisk->number]; - failed_desc = &sb->disks[fdisk->number]; - - if (spare_desc != *d || spare_desc->raid_disk != sdisk->raid_disk || - sdisk->raid_disk != spare_disk || fdisk->raid_disk != failed_disk || - failed_desc->raid_disk != fdisk->raid_disk) { - MD_BUG(); - err = 1; - goto abort; - } - - /* - * do the switch finally - */ - spare_rdev = find_rdev_nr(mddev, spare_desc->number); - failed_rdev = find_rdev_nr(mddev, failed_desc->number); - xchg_values(spare_rdev->desc_nr, failed_rdev->desc_nr); - spare_rdev->alias_device = 0; - failed_rdev->alias_device = 1; - - xchg_values(*spare_desc, *failed_desc); - xchg_values(*fdisk, *sdisk); - - /* - * (careful, 'failed' and 'spare' are switched from now on) - * - * we want to preserve linear numbering and we want to - * give the proper raid_disk number to the now activated - * disk. (this means we switch back these values) - */ - - xchg_values(spare_desc->raid_disk, failed_desc->raid_disk); - xchg_values(sdisk->raid_disk, fdisk->raid_disk); - xchg_values(spare_desc->number, failed_desc->number); - xchg_values(sdisk->number, fdisk->number); - - *d = failed_desc; - - if (!sdisk->bdev) - sdisk->used_slot = 0; - /* - * this really activates the spare. - */ - fdisk->spare = 0; - - /* - * if we activate a spare, we definitely replace a - * non-operational disk slot in the 'low' area of - * the disk array. - */ - - conf->working_disks++; -abort: - spin_unlock_irq(&conf->device_lock); - - print_multipath_conf(conf); - return err; -} - -static int multipath_add_disk(mddev_t *mddev, mdp_disk_t *added_desc, - mdk_rdev_t *rdev) +static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) { multipath_conf_t *conf = mddev->private; int err = 1; - int i; + struct multipath_info *p = conf->multipaths + rdev->raid_disk; print_multipath_conf(conf); spin_lock_irq(&conf->device_lock); - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - struct multipath_info *p = conf->multipaths + i; - if (!p->used_slot) { - if (added_desc->number != i) - break; - p->number = added_desc->number; - p->raid_disk = added_desc->raid_disk; - p->bdev = rdev->bdev; - p->operational = 0; - p->spare = 1; - p->used_slot = 1; - conf->nr_disks++; - err = 0; - break; - } + if (!p->used_slot) { + p->bdev = rdev->bdev; + p->operational = 1; + p->used_slot = 1; + conf->working_disks++; + err = 0; } if (err) MD_BUG(); @@ -615,27 +315,20 @@ { multipath_conf_t *conf = mddev->private; int err = 1; - int i; + struct multipath_info *p = conf->multipaths + number; print_multipath_conf(conf); spin_lock_irq(&conf->device_lock); - for (i = 0; i < MD_SB_DISKS; i++) { - struct multipath_info *p = conf->multipaths + i; - if (p->used_slot && (p->number == number)) { - if (p->operational) { - printk(KERN_ERR "hot-remove-disk, slot %d is identified to be the requested disk (number %d), but is still operational!\n", i, number); - err = -EBUSY; - goto abort; - } - if (p->spare && i < conf->raid_disks) - break; - p->bdev = NULL; - p->used_slot = 0; - conf->nr_disks--; - err = 0; - break; - } + if (p->used_slot) { + if (p->operational) { + printk(KERN_ERR "hot-remove-disk, slot %d is identified but is still operational!\n", number); + err = -EBUSY; + goto abort; + } + p->bdev = NULL; + p->used_slot = 0; + err = 0; } if (err) MD_BUG(); @@ -677,11 +370,8 @@ spin_unlock_irqrestore(&retry_list_lock, flags); mddev = mp_bh->mddev; - if (mddev->sb_dirty) { - printk(KERN_INFO "dirty sb detected, updating.\n"); - md_update_sb(mddev); - } - bio = mp_bh->bio; + bio = &mp_bh->bio; + bio->bi_sector = mp_bh->master_bio->bi_sector; bdev = bio->bi_bdev; multipath_map (mddev, &bio->bi_bdev); @@ -700,83 +390,6 @@ #undef IO_ERROR #undef REDIRECT_SECTOR -/* - * This will catch the scenario in which one of the multipaths was - * mounted as a normal device rather than as a part of a raid set. - * - * check_consistency is very personality-dependent, eg. RAID5 cannot - * do this check, it uses another method. - */ -static int __check_consistency (mddev_t *mddev, int row) -{ - multipath_conf_t *conf = mddev_to_conf(mddev); - int disks = MD_SB_DISKS; - struct block_device *bdev; - int i, rc = 0; - char *buffer; - struct page *page = NULL; - int first = 1; - int order = PAGE_CACHE_SHIFT-PAGE_SHIFT; - - buffer = (char *) __get_free_pages(GFP_KERNEL, order); - if (!buffer) - return rc; - - for (i = 0; i < disks; i++) { - struct address_space *mapping; - char *p; - if (!conf->multipaths[i].operational) - continue; - printk("(checking disk %d)\n",i); - bdev = conf->multipaths[i].bdev; - mapping = bdev->bd_inode->i_mapping; - page = read_cache_page(mapping, row/(PAGE_CACHE_SIZE/1024), - (filler_t *)mapping->a_ops->readpage, NULL); - if (IS_ERR(page)) { - page = NULL; - break; - } - wait_on_page_locked(page); - if (!PageUptodate(page)) - break; - if (PageError(page)) - break; - p = page_address(page); - if (first) { - memcpy(buffer, p, PAGE_CACHE_SIZE); - first = 0; - } else if (memcmp(buffer, p, PAGE_CACHE_SIZE)) { - rc = 1; - break; - } - page_cache_release(page); - fsync_bdev(bdev); - invalidate_bdev(bdev, 0); - page = NULL; - } - if (page) { - bdev = page->mapping->host->i_bdev; - page_cache_release(page); - fsync_bdev(bdev); - invalidate_bdev(bdev, 0); - } - free_pages((unsigned long) buffer, order); - return rc; -} - -static int check_consistency (mddev_t *mddev) -{ - if (__check_consistency(mddev, 0)) -/* - * we do not do this currently, as it's perfectly possible to - * have an inconsistent array when it's freshly created. Only - * newly written data has to be consistent. - */ - return 0; - - return 0; -} - #define INVALID_LEVEL KERN_WARNING \ "multipath: md%d: raid level not set to multipath IO (%d)\n" @@ -811,7 +424,7 @@ "multipath: detected IO path differences!\n" #define ARRAY_IS_ACTIVE KERN_INFO \ -"multipath: array md%d active with %d out of %d IO paths (%d spare IO paths)\n" +"multipath: array md%d active with %d out of %d IO paths\n" #define THREAD_ERROR KERN_ERR \ "multipath: couldn't allocate thread for md%d\n" @@ -819,18 +432,16 @@ static int multipath_run (mddev_t *mddev) { multipath_conf_t *conf; - int i, j, disk_idx; - struct multipath_info *disk, *disk2; - mdp_super_t *sb = mddev->sb; - mdp_disk_t *desc, *desc2; - mdk_rdev_t *rdev, *def_rdev = NULL; + int disk_idx; + struct multipath_info *disk; + mdk_rdev_t *rdev; struct list_head *tmp; int num_rdevs = 0; MOD_INC_USE_COUNT; - if (sb->level != -4) { - printk(INVALID_LEVEL, mdidx(mddev), sb->level); + if (mddev->level != LEVEL_MULTIPATH) { + printk(INVALID_LEVEL, mdidx(mddev), mddev->level); goto out; } /* @@ -866,115 +477,38 @@ continue; } - desc = &sb->disks[rdev->desc_nr]; - disk_idx = desc->raid_disk; + disk_idx = rdev->raid_disk; disk = conf->multipaths + disk_idx; - if (!disk_sync(desc)) - printk(NOT_IN_SYNC, bdev_partition_name(rdev->bdev)); - /* - * Mark all disks as spare to start with, then pick our - * active disk. If we have a disk that is marked active - * in the sb, then use it, else use the first rdev. + * Mark all disks as active to start with, there are no + * spares. multipath_read_balance deals with choose + * the "best" operational device. */ - disk->number = desc->number; - disk->raid_disk = desc->raid_disk; disk->bdev = rdev->bdev; - atomic_inc(&rdev->bdev->bd_count); - disk->operational = 0; - disk->spare = 1; - disk->used_slot = 1; - mark_disk_sync(desc); - - if (disk_active(desc)) { - if(!conf->working_disks) { - printk(OPERATIONAL, bdev_partition_name(rdev->bdev), - desc->raid_disk); - disk->operational = 1; - disk->spare = 0; - conf->working_disks++; - def_rdev = rdev; - } else { - mark_disk_spare(desc); - } - } else - mark_disk_spare(desc); - - if(!num_rdevs++) def_rdev = rdev; - } - if(!conf->working_disks && num_rdevs) { - desc = &sb->disks[def_rdev->desc_nr]; - disk = conf->multipaths + desc->raid_disk; - printk(OPERATIONAL, bdev_partition_name(def_rdev->bdev), - disk->raid_disk); disk->operational = 1; - disk->spare = 0; - conf->working_disks++; - mark_disk_active(desc); - } - /* - * Make sure our active path is in desc spot 0 - */ - if(def_rdev->desc_nr != 0) { - rdev = find_rdev_nr(mddev, 0); - desc = &sb->disks[def_rdev->desc_nr]; - desc2 = sb->disks; - disk = conf->multipaths + desc->raid_disk; - disk2 = conf->multipaths + desc2->raid_disk; - xchg_values(*desc2,*desc); - xchg_values(*disk2,*disk); - xchg_values(desc2->number, desc->number); - xchg_values(disk2->number, disk->number); - xchg_values(desc2->raid_disk, desc->raid_disk); - xchg_values(disk2->raid_disk, disk->raid_disk); - if(rdev) { - xchg_values(def_rdev->desc_nr,rdev->desc_nr); - } else { - def_rdev->desc_nr = 0; - } + disk->used_slot = 1; + num_rdevs++; } - conf->raid_disks = sb->raid_disks = sb->active_disks = 1; - conf->nr_disks = sb->nr_disks = sb->working_disks = num_rdevs; - sb->failed_disks = 0; - sb->spare_disks = num_rdevs - 1; + + conf->raid_disks = mddev->raid_disks = num_rdevs; mddev->sb_dirty = 1; conf->mddev = mddev; conf->device_lock = SPIN_LOCK_UNLOCKED; - init_waitqueue_head(&conf->wait_buffer); - if (!conf->working_disks) { printk(NONE_OPERATIONAL, mdidx(mddev)); goto out_free_conf; } - - /* pre-allocate some buffer_head structures. - * As a minimum, 1 mpbh and raid_disks buffer_heads - * would probably get us by in tight memory situations, - * but a few more is probably a good idea. - * For now, try NR_RESERVED_BUFS mpbh and - * NR_RESERVED_BUFS*raid_disks bufferheads - * This will allow at least NR_RESERVED_BUFS concurrent - * reads or writes even if kmalloc starts failing - */ - if (multipath_grow_mpbh(conf, NR_RESERVED_BUFS) < NR_RESERVED_BUFS) { + conf->pool = mempool_create(NR_RESERVED_BUFS, + mp_pool_alloc, mp_pool_free, + NULL); + if (conf->pool == NULL) { printk(MEM_ERROR, mdidx(mddev)); goto out_free_conf; } - if ((sb->state & (1 << MD_SB_CLEAN))) { - /* - * we do sanity checks even if the device says - * it's clean ... - */ - if (check_consistency(mddev)) { - printk(SB_DIFFERENCES); - sb->state &= ~(1 << MD_SB_CLEAN); - } - } - { const char * name = "multipathd"; @@ -985,30 +519,16 @@ } } - /* - * Regenerate the "device is in sync with the raid set" bit for - * each device. - */ - for (i = 0; i < MD_SB_DISKS; i++) { - mark_disk_nonsync(sb->disks+i); - for (j = 0; j < sb->raid_disks; j++) { - if (sb->disks[i].number == conf->multipaths[j].number) - mark_disk_sync(sb->disks+i); - } - } - - printk(ARRAY_IS_ACTIVE, mdidx(mddev), sb->active_disks, - sb->raid_disks, sb->spare_disks); + printk(ARRAY_IS_ACTIVE, mdidx(mddev), conf->working_disks, + mddev->raid_disks); /* * Ok, everything is just fine now */ return 0; out_free_conf: - multipath_shrink_mpbh(conf); - for (i = 0; i < MD_SB_DISKS; i++) - if (conf->multipaths[i].bdev) - bdput(conf->multipaths[i].bdev); + if (conf->pool) + mempool_destroy(conf->pool); kfree(conf); mddev->private = NULL; out: @@ -1031,13 +551,9 @@ static int multipath_stop (mddev_t *mddev) { multipath_conf_t *conf = mddev_to_conf(mddev); - int i; md_unregister_thread(conf->thread); - multipath_shrink_mpbh(conf); - for (i = 0; i < MD_SB_DISKS; i++) - if (conf->multipaths[i].bdev) - bdput(conf->multipaths[i].bdev); + mempool_destroy(conf->pool); kfree(conf); mddev->private = NULL; MOD_DEC_USE_COUNT; @@ -1054,9 +570,6 @@ error_handler: multipath_error, hot_add_disk: multipath_add_disk, hot_remove_disk:multipath_remove_disk, - spare_inactive: multipath_spare_inactive, - spare_active: multipath_spare_active, - spare_write: multipath_spare_write, }; static int __init multipath_init (void) diff -Nru a/drivers/md/raid0.c b/drivers/md/raid0.c --- a/drivers/md/raid0.c Sat Jul 20 12:12:35 2002 +++ b/drivers/md/raid0.c Sat Jul 20 12:12:35 2002 @@ -43,12 +43,12 @@ conf->nr_strip_zones = 0; ITERATE_RDEV(mddev,rdev1,tmp1) { - printk("raid0: looking at %s\n", partition_name(rdev1->dev)); + printk("raid0: looking at %s\n", bdev_partition_name(rdev1->bdev)); c = 0; ITERATE_RDEV(mddev,rdev2,tmp2) { printk("raid0: comparing %s(%ld) with %s(%ld)\n", - partition_name(rdev1->dev), rdev1->size, - partition_name(rdev2->dev), rdev2->size); + bdev_partition_name(rdev1->bdev), rdev1->size, + bdev_partition_name(rdev2->bdev), rdev2->size); if (rdev2 == rdev1) { printk("raid0: END\n"); break; @@ -89,7 +89,7 @@ ITERATE_RDEV(mddev, rdev1, tmp1) { int j = rdev1->sb->this_disk.raid_disk; - if (j < 0 || j >= mddev->sb->raid_disks) { + if (j < 0 || j >= mddev->raid_disks) { printk("raid0: bad disk number %d - aborting!\n", j); goto abort; } @@ -102,9 +102,9 @@ smallest = rdev1; cnt++; } - if (cnt != mddev->sb->raid_disks) { + if (cnt != mddev->raid_disks) { printk("raid0: too few disks (%d of %d) - aborting!\n", cnt, - mddev->sb->raid_disks); + mddev->raid_disks); goto abort; } zone->nb_dev = cnt; @@ -127,7 +127,7 @@ for (j=0; jstrip_zone[0].dev[j]; - printk("raid0: checking %s ...", partition_name(rdev->dev)); + printk("raid0: checking %s ...", bdev_partition_name(rdev->bdev)); if (rdev->size > current_offset) { printk(" contained as device %d\n", c); @@ -271,7 +271,7 @@ mdk_rdev_t *tmp_dev; unsigned long chunk, block, rsect; - chunk_size = mddev->sb->chunk_size >> 10; + chunk_size = mddev->chunk_size >> 10; chunksize_bits = ffz(~chunk_size); block = bio->bi_sector >> 1; hash = conf->hash_table + block / conf->smallest->size; @@ -351,8 +351,8 @@ for (j = 0; j < conf->nr_strip_zones; j++) { sz += sprintf(page + sz, " z%d=[", j); for (k = 0; k < conf->strip_zone[j].nb_dev; k++) - sz += sprintf (page+sz, "%s/", partition_name( - conf->strip_zone[j].dev[k]->dev)); + sz += sprintf (page+sz, "%s/", bdev_partition_name( + conf->strip_zone[j].dev[k]->bdev)); sz--; sz += sprintf (page+sz, "] zo=%d do=%d s=%d\n", conf->strip_zone[j].zone_offset, @@ -360,7 +360,7 @@ conf->strip_zone[j].size); } #endif - sz += sprintf(page + sz, " %dk chunks", mddev->sb->chunk_size/1024); + sz += sprintf(page + sz, " %dk chunks", mddev->chunk_size/1024); return sz; } diff -Nru a/drivers/md/raid1.c b/drivers/md/raid1.c --- a/drivers/md/raid1.c Sat Jul 20 12:12:34 2002 +++ b/drivers/md/raid1.c Sat Jul 20 12:12:34 2002 @@ -251,13 +251,21 @@ { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); - int i; - + int mirror; + conf_t *conf = mddev_to_conf(r1_bio->mddev); + + if (r1_bio->cmd == READ || r1_bio->cmd == READA) + mirror = r1_bio->read_disk; + else { + for (mirror = 0; mirror < MD_SB_DISKS; mirror++) + if (r1_bio->write_bios[mirror] == bio) + break; + } /* * this branch is our 'one mirror IO has finished' event handler: */ if (!uptodate) - md_error(r1_bio->mddev, bio->bi_bdev); + md_error(r1_bio->mddev, conf->mirrors[mirror].bdev); else /* * Set R1BIO_Uptodate in our master bio, so that @@ -270,10 +278,10 @@ */ set_bit(R1BIO_Uptodate, &r1_bio->state); + update_head_pos(mirror, r1_bio); if ((r1_bio->cmd == READ) || (r1_bio->cmd == READA)) { if (!r1_bio->read_bio) BUG(); - update_head_pos(r1_bio->read_disk, r1_bio); /* * we have only one bio on the read side */ @@ -285,7 +293,7 @@ * oops, read error: */ printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n", - bdev_partition_name(bio->bi_bdev), r1_bio->sector); + bdev_partition_name(conf->mirrors[mirror].bdev), r1_bio->sector); reschedule_retry(r1_bio); return; } @@ -295,14 +303,6 @@ /* * WRITE: * - * First, find the disk this bio belongs to. - */ - for (i = 0; i < MD_SB_DISKS; i++) - if (r1_bio->write_bios[i] == bio) { - update_head_pos(i, r1_bio); - break; - } - /* * Let's see if all mirrored write operations have finished * already. */ @@ -575,20 +575,13 @@ { conf_t *conf = mddev_to_conf(mddev); mirror_info_t *mirror = conf->mirrors+failed; - mdp_super_t *sb = mddev->sb; mirror->operational = 0; - mark_disk_faulty(sb->disks+mirror->number); - mark_disk_nonsync(sb->disks+mirror->number); - mark_disk_inactive(sb->disks+mirror->number); - if (!mirror->write_only) - sb->active_disks--; - sb->working_disks--; - sb->failed_disks++; - mddev->sb_dirty = 1; - md_wakeup_thread(conf->thread); - if (!mirror->write_only) + if (!mirror->write_only) { + mddev->degraded++; conf->working_disks--; + } + mddev->sb_dirty = 1; printk(DISK_FAILED, bdev_partition_name(mirror->bdev), conf->working_disks); } @@ -632,14 +625,14 @@ printk("(!conf)\n"); return; } - printk(" --- wd:%d rd:%d nd:%d\n", conf->working_disks, - conf->raid_disks, conf->nr_disks); + printk(" --- wd:%d rd:%d\n", conf->working_disks, + conf->raid_disks); for (i = 0; i < MD_SB_DISKS; i++) { tmp = conf->mirrors + i; - printk(" disk %d, s:%d, o:%d, n:%d rd:%d us:%d dev:%s\n", + printk(" disk %d, s:%d, o:%d, us:%d dev:%s\n", i, tmp->spare, tmp->operational, - tmp->number, tmp->raid_disk, tmp->used_slot, + tmp->used_slot, bdev_partition_name(tmp->bdev)); } } @@ -658,26 +651,12 @@ conf->r1buf_pool = NULL; } -static mirror_info_t *find_spare(mddev_t *mddev, int number) -{ - conf_t *conf = mddev->private; - int i; - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - mirror_info_t *p = conf->mirrors + i; - if (p->spare && p->number == number) - return p; - } - return NULL; -} - -static int raid1_spare_active(mddev_t *mddev, mdp_disk_t **d) +static int raid1_spare_active(mddev_t *mddev) { int err = 0; int i, failed_disk = -1, spare_disk = -1; conf_t *conf = mddev->private; mirror_info_t *tmp, *sdisk, *fdisk; - mdp_super_t *sb = mddev->sb; - mdp_disk_t *failed_desc, *spare_desc; mdk_rdev_t *spare_rdev, *failed_rdev; print_conf(conf); @@ -707,48 +686,28 @@ * Find the spare disk ... (can only be in the 'high' * area of the array) */ - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - tmp = conf->mirrors + i; - if (tmp->spare && tmp->number == (*d)->number) { - spare_disk = i; - break; - } - } - if (spare_disk == -1) { - MD_BUG(); - err = 1; - goto abort; - } + spare_disk = mddev->spare->raid_disk; sdisk = conf->mirrors + spare_disk; fdisk = conf->mirrors + failed_disk; - spare_desc = &sb->disks[sdisk->number]; - failed_desc = &sb->disks[fdisk->number]; - - if (spare_desc != *d || spare_desc->raid_disk != sdisk->raid_disk || - sdisk->raid_disk != spare_disk || fdisk->raid_disk != failed_disk || - failed_desc->raid_disk != fdisk->raid_disk) { - MD_BUG(); - err = 1; - goto abort; - } - /* * do the switch finally */ - spare_rdev = find_rdev_nr(mddev, spare_desc->number); - failed_rdev = find_rdev_nr(mddev, failed_desc->number); + spare_rdev = find_rdev_nr(mddev, spare_disk); + failed_rdev = find_rdev_nr(mddev, failed_disk); /* * There must be a spare_rdev, but there may not be a * failed_rdev. That slot might be empty... */ - spare_rdev->desc_nr = failed_desc->number; - if (failed_rdev) - failed_rdev->desc_nr = spare_desc->number; + spare_rdev->desc_nr = failed_disk; + spare_rdev->raid_disk = failed_disk; + if (failed_rdev) { + failed_rdev->desc_nr = spare_disk; + failed_rdev->raid_disk = spare_disk; + } - xchg_values(*spare_desc, *failed_desc); xchg_values(*fdisk, *sdisk); /* @@ -758,12 +717,6 @@ * give the proper raid_disk number to the now activated * disk. (this means we switch back these values) */ - xchg_values(spare_desc->raid_disk, failed_desc->raid_disk); - xchg_values(sdisk->raid_disk, fdisk->raid_disk); - xchg_values(spare_desc->number, failed_desc->number); - xchg_values(sdisk->number, fdisk->number); - - *d = failed_desc; if (!sdisk->bdev) sdisk->used_slot = 0; @@ -780,6 +733,7 @@ */ conf->working_disks++; + mddev->degraded--; abort: spin_unlock_irq(&conf->device_lock); @@ -795,7 +749,7 @@ print_conf(conf); spin_lock_irq(&conf->device_lock); - p = find_spare(mddev, mddev->spare->number); + p = conf->mirrors + mddev->spare->raid_disk; if (p) { p->operational = 0; p->write_only = 0; @@ -808,7 +762,7 @@ return err; } -static int raid1_spare_write(mddev_t *mddev, int number) +static int raid1_spare_write(mddev_t *mddev) { conf_t *conf = mddev->private; mirror_info_t *p; @@ -816,7 +770,7 @@ print_conf(conf); spin_lock_irq(&conf->device_lock); - p = find_spare(mddev, number); + p = conf->mirrors + mddev->spare->raid_disk; if (p) { p->operational = 1; p->write_only = 1; @@ -829,36 +783,23 @@ return err; } -static int raid1_add_disk(mddev_t *mddev, mdp_disk_t *added_desc, - mdk_rdev_t *rdev) +static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) { conf_t *conf = mddev->private; int err = 1; - int i; + mirror_info_t *p = conf->mirrors + rdev->raid_disk; print_conf(conf); spin_lock_irq(&conf->device_lock); - /* - * find the disk ... - */ - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - mirror_info_t *p = conf->mirrors + i; - if (!p->used_slot) { - if (added_desc->number != i) - break; - p->number = added_desc->number; - p->raid_disk = added_desc->raid_disk; - /* it will be held open by rdev */ - p->bdev = rdev->bdev; - p->operational = 0; - p->write_only = 0; - p->spare = 1; - p->used_slot = 1; - p->head_position = 0; - conf->nr_disks++; - err = 0; - break; - } + if (!p->used_slot) { + /* it will be held open by rdev */ + p->bdev = rdev->bdev; + p->operational = 0; + p->write_only = 0; + p->spare = 1; + p->used_slot = 1; + p->head_position = 0; + err = 0; } if (err) MD_BUG(); @@ -872,25 +813,18 @@ { conf_t *conf = mddev->private; int err = 1; - int i; + mirror_info_t *p = conf->mirrors+ number; print_conf(conf); spin_lock_irq(&conf->device_lock); - for (i = 0; i < MD_SB_DISKS; i++) { - mirror_info_t *p = conf->mirrors + i; - if (p->used_slot && (p->number == number)) { - if (p->operational) { - err = -EBUSY; - goto abort; - } - if (p->spare && (i < conf->raid_disks)) - break; - p->bdev = NULL; - p->used_slot = 0; - conf->nr_disks--; - err = 0; - break; + if (p->used_slot) { + if (p->operational) { + err = -EBUSY; + goto abort; } + p->bdev = NULL; + p->used_slot = 0; + err = 0; } if (err) MD_BUG(); @@ -911,6 +845,7 @@ { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); + conf_t *conf = mddev_to_conf(r1_bio->mddev); if (r1_bio->read_bio != bio) BUG(); @@ -921,7 +856,8 @@ * We don't do much here, just schedule handling by raid1d */ if (!uptodate) - md_error (r1_bio->mddev, bio->bi_bdev); + md_error(r1_bio->mddev, + conf->mirrors[r1_bio->read_disk].bdev); else set_bit(R1BIO_Uptodate, &r1_bio->state); reschedule_retry(r1_bio); @@ -932,19 +868,20 @@ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); mddev_t *mddev = r1_bio->mddev; + conf_t *conf = mddev_to_conf(mddev); int i; - - if (!uptodate) - md_error(mddev, bio->bi_bdev); + int mirror=0; for (i = 0; i < MD_SB_DISKS; i++) if (r1_bio->write_bios[i] == bio) { - update_head_pos(i, r1_bio); + mirror = i; break; } + if (!uptodate) + md_error(mddev, conf->mirrors[mirror].bdev); + update_head_pos(mirror, r1_bio); if (atomic_dec_and_test(&r1_bio->remaining)) { - conf_t *conf = mddev_to_conf(mddev); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, uptodate); resume_device(conf); put_buf(r1_bio); @@ -1058,10 +995,6 @@ mddev = r1_bio->mddev; conf = mddev_to_conf(mddev); - if (mddev->sb_dirty) { - printk(KERN_INFO "raid1: dirty sb detected, updating.\n"); - md_update_sb(mddev); - } bio = r1_bio->master_bio; switch(r1_bio->cmd) { case SPECIAL: @@ -1127,7 +1060,7 @@ if (init_resync(conf)) return -ENOMEM; - max_sector = mddev->sb->size << 1; + max_sector = mddev->size << 1; if (sector_nr >= max_sector) { close_sync(conf); return 0; @@ -1247,15 +1180,13 @@ conf_t *conf; int i, j, disk_idx; mirror_info_t *disk; - mdp_super_t *sb = mddev->sb; - mdp_disk_t *descriptor; mdk_rdev_t *rdev; struct list_head *tmp; MOD_INC_USE_COUNT; - if (sb->level != 1) { - printk(INVALID_LEVEL, mdidx(mddev), sb->level); + if (mddev->level != 1) { + printk(INVALID_LEVEL, mdidx(mddev), mddev->level); goto out; } /* @@ -1293,15 +1224,11 @@ MD_BUG(); continue; } - descriptor = &sb->disks[rdev->desc_nr]; - disk_idx = descriptor->raid_disk; + disk_idx = rdev->raid_disk; disk = conf->mirrors + disk_idx; - if (disk_faulty(descriptor)) { - disk->number = descriptor->number; - disk->raid_disk = disk_idx; + if (rdev->faulty) { disk->bdev = rdev->bdev; - atomic_inc(&rdev->bdev->bd_count); disk->operational = 0; disk->write_only = 0; disk->spare = 0; @@ -1309,19 +1236,7 @@ disk->head_position = 0; continue; } - if (disk_active(descriptor)) { - if (!disk_sync(descriptor)) { - printk(NOT_IN_SYNC, - bdev_partition_name(rdev->bdev)); - continue; - } - if ((descriptor->number > MD_SB_DISKS) || - (disk_idx > sb->raid_disks)) { - - printk(INCONSISTENT, - bdev_partition_name(rdev->bdev)); - continue; - } + if (rdev->in_sync) { if (disk->operational) { printk(ALREADY_RUNNING, bdev_partition_name(rdev->bdev), @@ -1330,10 +1245,7 @@ } printk(OPERATIONAL, bdev_partition_name(rdev->bdev), disk_idx); - disk->number = descriptor->number; - disk->raid_disk = disk_idx; disk->bdev = rdev->bdev; - atomic_inc(&rdev->bdev->bd_count); disk->operational = 1; disk->write_only = 0; disk->spare = 0; @@ -1345,10 +1257,7 @@ * Must be a spare disk .. */ printk(SPARE, bdev_partition_name(rdev->bdev)); - disk->number = descriptor->number; - disk->raid_disk = disk_idx; disk->bdev = rdev->bdev; - atomic_inc(&rdev->bdev->bd_count); disk->operational = 0; disk->write_only = 0; disk->spare = 1; @@ -1356,8 +1265,7 @@ disk->head_position = 0; } } - conf->raid_disks = sb->raid_disks; - conf->nr_disks = sb->nr_disks; + conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; conf->device_lock = SPIN_LOCK_UNLOCKED; @@ -1370,16 +1278,12 @@ goto out_free_conf; } - for (i = 0; i < MD_SB_DISKS; i++) { + mddev->degraded = 0; + for (i = 0; i < conf->raid_disks; i++) { - descriptor = sb->disks+i; - disk_idx = descriptor->raid_disk; - disk = conf->mirrors + disk_idx; + disk = conf->mirrors + i; - if (disk_faulty(descriptor) && (disk_idx < conf->raid_disks) && - !disk->used_slot) { - disk->number = descriptor->number; - disk->raid_disk = disk_idx; + if (!disk->used_slot) { disk->bdev = NULL; disk->operational = 0; disk->write_only = 0; @@ -1387,6 +1291,8 @@ disk->used_slot = 1; disk->head_position = 0; } + if (!disk->used_slot) + mddev->degraded++; } /* @@ -1409,23 +1315,7 @@ } } - - /* - * Regenerate the "device is in sync with the raid set" bit for - * each device. - */ - for (i = 0; i < MD_SB_DISKS; i++) { - mark_disk_nonsync(sb->disks+i); - for (j = 0; j < sb->raid_disks; j++) { - if (!conf->mirrors[j].operational) - continue; - if (sb->disks[i].number == conf->mirrors[j].number) - mark_disk_sync(sb->disks+i); - } - } - sb->active_disks = conf->working_disks; - - printk(ARRAY_IS_ACTIVE, mdidx(mddev), sb->active_disks, sb->raid_disks); + printk(ARRAY_IS_ACTIVE, mdidx(mddev), mddev->raid_disks - mddev->degraded, mddev->raid_disks); /* * Ok, everything is just fine now */ @@ -1434,9 +1324,6 @@ out_free_conf: if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); - for (i = 0; i < MD_SB_DISKS; i++) - if (conf->mirrors[i].bdev) - bdput(conf->mirrors[i].bdev); kfree(conf); mddev->private = NULL; out: @@ -1447,14 +1334,10 @@ static int stop(mddev_t *mddev) { conf_t *conf = mddev_to_conf(mddev); - int i; md_unregister_thread(conf->thread); if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); - for (i = 0; i < MD_SB_DISKS; i++) - if (conf->mirrors[i].bdev) - bdput(conf->mirrors[i].bdev); kfree(conf); mddev->private = NULL; MOD_DEC_USE_COUNT; diff -Nru a/drivers/md/raid5.c b/drivers/md/raid5.c --- a/drivers/md/raid5.c Sat Jul 20 12:12:35 2002 +++ b/drivers/md/raid5.c Sat Jul 20 12:12:35 2002 @@ -371,7 +371,7 @@ set_bit(R5_UPTODATE, &sh->dev[i].flags); #endif } else { - md_error(conf->mddev, bi->bi_bdev); + md_error(conf->mddev, conf->disks[i].bdev); clear_bit(R5_UPTODATE, &sh->dev[i].flags); } #if 0 @@ -407,7 +407,7 @@ spin_lock_irqsave(&conf->device_lock, flags); if (!uptodate) - md_error(conf->mddev, bi->bi_bdev); + md_error(conf->mddev, conf->disks[i].bdev); clear_bit(R5_LOCKED, &sh->dev[i].flags); set_bit(STRIPE_HANDLE, &sh->state); @@ -442,7 +442,6 @@ static int error(mddev_t *mddev, struct block_device *bdev) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; - mdp_super_t *sb = mddev->sb; struct disk_info *disk; int i; @@ -453,16 +452,10 @@ continue; if (disk->operational) { disk->operational = 0; - mark_disk_faulty(sb->disks+disk->number); - mark_disk_nonsync(sb->disks+disk->number); - mark_disk_inactive(sb->disks+disk->number); - sb->active_disks--; - sb->working_disks--; - sb->failed_disks++; mddev->sb_dirty = 1; + mddev->degraded++; conf->working_disks--; conf->failed_disks++; - md_wakeup_thread(conf->thread); printk (KERN_ALERT "raid5: Disk failure on %s, disabling device." " Operation continuing on %d devices\n", @@ -486,15 +479,8 @@ disk->operational = 0; disk->write_only = 0; conf->spare = NULL; - mark_disk_faulty(sb->disks+disk->number); - mark_disk_nonsync(sb->disks+disk->number); - mark_disk_inactive(sb->disks+disk->number); - sb->spare_disks--; - sb->working_disks--; - sb->failed_disks++; mddev->sb_dirty = 1; - md_wakeup_thread(conf->thread); return 0; } @@ -1304,7 +1290,7 @@ int raid_disks = conf->raid_disks; int data_disks = raid_disks-1; - if (sector_nr >= mddev->sb->size <<1) + if (sector_nr >= mddev->size <<1) /* just being told to finish up .. nothing to do */ return 0; @@ -1339,9 +1325,6 @@ PRINTK("+++ raid5d active\n"); handled = 0; - - if (mddev->sb_dirty) - md_update_sb(mddev); spin_lock_irq(&conf->device_lock); while (1) { struct list_head *first; @@ -1380,17 +1363,15 @@ static int run (mddev_t *mddev) { raid5_conf_t *conf; - int i, j, raid_disk, memory; - mdp_super_t *sb = mddev->sb; - mdp_disk_t *desc; + int i, raid_disk, memory; mdk_rdev_t *rdev; struct disk_info *disk; struct list_head *tmp; MOD_INC_USE_COUNT; - if (sb->level != 5 && sb->level != 4) { - printk("raid5: md%d: raid level not set to 4/5 (%d)\n", mdidx(mddev), sb->level); + if (mddev->level != 5 && mddev->level != 4) { + printk("raid5: md%d: raid level not set to 4/5 (%d)\n", mdidx(mddev), mddev->level); MOD_DEC_USE_COUNT; return -EIO; } @@ -1423,18 +1404,11 @@ * the disk only to get a pointer to the descriptor on * the main superblock, which might be more recent. */ - desc = sb->disks + rdev->desc_nr; - raid_disk = desc->raid_disk; + raid_disk = rdev->raid_disk; disk = conf->disks + raid_disk; - if (disk_faulty(desc)) { + if (rdev->faulty) { printk(KERN_ERR "raid5: disabled device %s (errors detected)\n", bdev_partition_name(rdev->bdev)); - if (!rdev->faulty) { - MD_BUG(); - goto abort; - } - disk->number = desc->number; - disk->raid_disk = raid_disk; disk->bdev = rdev->bdev; disk->operational = 0; @@ -1443,24 +1417,13 @@ disk->used_slot = 1; continue; } - if (disk_active(desc)) { - if (!disk_sync(desc)) { - printk(KERN_ERR "raid5: disabled device %s (not in sync)\n", bdev_partition_name(rdev->bdev)); - MD_BUG(); - goto abort; - } - if (raid_disk > sb->raid_disks) { - printk(KERN_ERR "raid5: disabled device %s (inconsistent descriptor)\n", bdev_partition_name(rdev->bdev)); - continue; - } + if (rdev->in_sync) { if (disk->operational) { printk(KERN_ERR "raid5: disabled device %s (device %d already operational)\n", bdev_partition_name(rdev->bdev), raid_disk); continue; } printk(KERN_INFO "raid5: device %s operational as raid disk %d\n", bdev_partition_name(rdev->bdev), raid_disk); - disk->number = desc->number; - disk->raid_disk = raid_disk; disk->bdev = rdev->bdev; disk->operational = 1; disk->used_slot = 1; @@ -1471,8 +1434,6 @@ * Must be a spare disk .. */ printk(KERN_INFO "raid5: spare disk %s\n", bdev_partition_name(rdev->bdev)); - disk->number = desc->number; - disk->raid_disk = raid_disk; disk->bdev = rdev->bdev; disk->operational = 0; @@ -1482,16 +1443,10 @@ } } - for (i = 0; i < MD_SB_DISKS; i++) { - desc = sb->disks + i; - raid_disk = desc->raid_disk; - disk = conf->disks + raid_disk; - - if (disk_faulty(desc) && (raid_disk < sb->raid_disks) && - !conf->disks[raid_disk].used_slot) { + for (i = 0; i < conf->raid_disks; i++) { + disk = conf->disks + i; - disk->number = desc->number; - disk->raid_disk = raid_disk; + if (!disk->used_slot) { disk->bdev = NULL; disk->operational = 0; @@ -1501,15 +1456,15 @@ } } - conf->raid_disks = sb->raid_disks; + conf->raid_disks = mddev->raid_disks; /* * 0 for a fully functional array, 1 for a degraded array. */ - conf->failed_disks = conf->raid_disks - conf->working_disks; + mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks; conf->mddev = mddev; - conf->chunk_size = sb->chunk_size; - conf->level = sb->level; - conf->algorithm = sb->layout; + conf->chunk_size = mddev->chunk_size; + conf->level = mddev->level; + conf->algorithm = mddev->layout; conf->max_nr_stripes = NR_STRIPES; #if 0 @@ -1528,13 +1483,13 @@ printk(KERN_ERR "raid5: unsupported parity algorithm %d for md%d\n", conf->algorithm, mdidx(mddev)); goto abort; } - if (conf->failed_disks > 1) { + if (mddev->degraded > 1) { printk(KERN_ERR "raid5: not enough operational devices for md%d (%d/%d failed)\n", mdidx(mddev), conf->failed_disks, conf->raid_disks); goto abort; } - if (conf->failed_disks == 1 && - !(sb->state & (1<degraded == 1 && + !(mddev->state & (1<disks + i); - for (j = 0; j < sb->raid_disks; j++) { - if (!conf->disks[j].operational) - continue; - if (sb->disks[i].number == conf->disks[j].number) - mark_disk_sync(sb->disks + i); - } - } - sb->active_disks = conf->working_disks; - - if (sb->active_disks == sb->raid_disks) - printk("raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), sb->active_disks, sb->raid_disks, conf->algorithm); + if (mddev->degraded == 0) + printk("raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), + mddev->raid_disks-mddev->degraded, mddev->raid_disks, conf->algorithm); else - printk(KERN_ALERT "raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), sb->active_disks, sb->raid_disks, conf->algorithm); + printk(KERN_ALERT "raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), + mddev->raid_disks = mddev->degraded, mddev->raid_disks, conf->algorithm); print_raid5_conf(conf); @@ -1649,10 +1591,9 @@ static int status (char *page, mddev_t *mddev) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; - mdp_super_t *sb = mddev->sb; int sz = 0, i; - sz += sprintf (page+sz, " level %d, %dk chunk, algorithm %d", sb->level, sb->chunk_size >> 10, sb->layout); + sz += sprintf (page+sz, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); sz += sprintf (page+sz, " [%d/%d] [", conf->raid_disks, conf->working_disks); for (i = 0; i < conf->raid_disks; i++) sz += sprintf (page+sz, "%s", conf->disks[i].operational ? "U" : "_"); @@ -1684,33 +1625,19 @@ for (i = 0; i < conf->working_disks+conf->failed_disks; i++) { #endif tmp = conf->disks + i; - printk(" disk %d, s:%d, o:%d, n:%d rd:%d us:%d dev:%s\n", + printk(" disk %d, s:%d, o:%d, us:%d dev:%s\n", i, tmp->spare,tmp->operational, - tmp->number,tmp->raid_disk,tmp->used_slot, + tmp->used_slot, bdev_partition_name(tmp->bdev)); } } -static struct disk_info *find_spare(mddev_t *mddev, int number) -{ - raid5_conf_t *conf = mddev->private; - int i; - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - struct disk_info *p = conf->disks + i; - if (p->spare && p->number == number) - return p; - } - return NULL; -} - -static int raid5_spare_active(mddev_t *mddev, mdp_disk_t **d) +static int raid5_spare_active(mddev_t *mddev) { int err = 0; int i, failed_disk=-1, spare_disk=-1; raid5_conf_t *conf = mddev->private; struct disk_info *tmp, *sdisk, *fdisk; - mdp_super_t *sb = mddev->sb; - mdp_disk_t *failed_desc, *spare_desc; mdk_rdev_t *spare_rdev, *failed_rdev; print_raid5_conf(conf); @@ -1732,18 +1659,7 @@ * Find the spare disk ... (can only be in the 'high' * area of the array) */ - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - tmp = conf->disks + i; - if (tmp->spare && tmp->number == (*d)->number) { - spare_disk = i; - break; - } - } - if (spare_disk == -1) { - MD_BUG(); - err = 1; - goto abort; - } + spare_disk = mddev->spare->raid_disk; if (!conf->spare) { MD_BUG(); @@ -1753,31 +1669,22 @@ sdisk = conf->disks + spare_disk; fdisk = conf->disks + failed_disk; - spare_desc = &sb->disks[sdisk->number]; - failed_desc = &sb->disks[fdisk->number]; - - if (spare_desc != *d || spare_desc->raid_disk != sdisk->raid_disk || - sdisk->raid_disk != spare_disk || fdisk->raid_disk != failed_disk || - failed_desc->raid_disk != fdisk->raid_disk) { - MD_BUG(); - err = 1; - goto abort; - } - /* * do the switch finally */ - spare_rdev = find_rdev_nr(mddev, spare_desc->number); - failed_rdev = find_rdev_nr(mddev, failed_desc->number); + spare_rdev = find_rdev_nr(mddev, spare_disk); + failed_rdev = find_rdev_nr(mddev, failed_disk); /* There must be a spare_rdev, but there may not be a * failed_rdev. That slot might be empty... */ - spare_rdev->desc_nr = failed_desc->number; - if (failed_rdev) - failed_rdev->desc_nr = spare_desc->number; + spare_rdev->desc_nr = failed_disk; + spare_rdev->raid_disk = failed_disk; + if (failed_rdev) { + failed_rdev->desc_nr = spare_disk; + failed_rdev->raid_disk = spare_disk; + } - xchg_values(*spare_desc, *failed_desc); xchg_values(*fdisk, *sdisk); /* @@ -1788,13 +1695,6 @@ * disk. (this means we switch back these values) */ - xchg_values(spare_desc->raid_disk, failed_desc->raid_disk); - xchg_values(sdisk->raid_disk, fdisk->raid_disk); - xchg_values(spare_desc->number, failed_desc->number); - xchg_values(sdisk->number, fdisk->number); - - *d = failed_desc; - if (!sdisk->bdev) sdisk->used_slot = 0; @@ -1809,6 +1709,7 @@ * non-operational disk slot in the 'low' area of * the disk array. */ + mddev->degraded--; conf->failed_disks--; conf->working_disks++; conf->spare = NULL; @@ -1826,7 +1727,7 @@ print_raid5_conf(conf); spin_lock_irq(&conf->device_lock); - p = find_spare(mddev, mddev->spare->number); + p = conf->disks + mddev->spare->raid_disk; if (p) { p->operational = 0; p->write_only = 0; @@ -1841,7 +1742,7 @@ return err; } -static int raid5_spare_write(mddev_t *mddev, int number) +static int raid5_spare_write(mddev_t *mddev) { raid5_conf_t *conf = mddev->private; struct disk_info *p; @@ -1849,7 +1750,7 @@ print_raid5_conf(conf); spin_lock_irq(&conf->device_lock); - p = find_spare(mddev, number); + p = conf->disks + mddev->spare->raid_disk; if (p && !conf->spare) { p->operational = 1; p->write_only = 1; @@ -1867,25 +1768,19 @@ { raid5_conf_t *conf = mddev->private; int err = 1; - int i; + struct disk_info *p = conf->disks + number; print_raid5_conf(conf); spin_lock_irq(&conf->device_lock); - for (i = 0; i < MD_SB_DISKS; i++) { - struct disk_info *p = conf->disks + i; - if (p->used_slot && p->number == number) { - if (p->operational) { - err = -EBUSY; - goto abort; - } - if (p->spare && i < conf->raid_disks) - break; - p->bdev = NULL; - p->used_slot = 0; - err = 0; - break; + if (p->used_slot) { + if (p->operational) { + err = -EBUSY; + goto abort; } + p->bdev = NULL; + p->used_slot = 0; + err = 0; } if (err) MD_BUG(); @@ -1895,12 +1790,11 @@ return err; } -static int raid5_add_disk(mddev_t *mddev, mdp_disk_t *added_desc, - mdk_rdev_t *rdev) +static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) { raid5_conf_t *conf = mddev->private; int err = 1; - int i; + struct disk_info *p = conf->disks + rdev->raid_disk; print_raid5_conf(conf); spin_lock_irq(&conf->device_lock); @@ -1908,22 +1802,14 @@ * find the disk ... */ - for (i = conf->raid_disks; i < MD_SB_DISKS; i++) { - struct disk_info *p = conf->disks + i; - if (!p->used_slot) { - if (added_desc->number != i) - break; - p->number = added_desc->number; - p->raid_disk = added_desc->raid_disk; - /* it will be held open by rdev */ - p->bdev = rdev->bdev; - p->operational = 0; - p->write_only = 0; - p->spare = 1; - p->used_slot = 1; - err = 0; - break; - } + if (!p->used_slot) { + /* it will be held open by rdev */ + p->bdev = rdev->bdev; + p->operational = 0; + p->write_only = 0; + p->spare = 1; + p->used_slot = 1; + err = 0; } if (err) MD_BUG(); diff -Nru a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c --- a/drivers/scsi/ide-scsi.c Sat Jul 20 12:12:34 2002 +++ b/drivers/scsi/ide-scsi.c Sat Jul 20 12:12:34 2002 @@ -244,7 +244,7 @@ u8 *scsi_buf; if (!(rq->flags & REQ_PC)) { - __ata_end_request(drive, rq, uptodate, 0); + ata_end_request(drive, rq, uptodate, 0); return 0; } @@ -318,10 +318,11 @@ if (ata_status(drive, 0, DRQ_STAT)) { /* No more interrupts */ if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); - ide__sti(); + local_irq_enable(); if (drive->status & ERR_STAT) rq->errors++; idescsi_end_request(drive, rq, 1); + return ATA_OP_FINISHED; } bcount = IN_BYTE (IDE_BCOUNTH_REG) << 8 | IN_BYTE (IDE_BCOUNTL_REG); @@ -491,14 +492,13 @@ MOD_DEC_USE_COUNT; } +static Scsi_Host_Template template; static int idescsi_cleanup (struct ata_device *drive) { - struct Scsi_Host *host = drive->driver_data; - if (ide_unregister_subdriver (drive)) { return 1; } - scsi_unregister(host); + scsi_unregister_host(&template); return 0; } @@ -801,7 +801,6 @@ static void __exit exit_idescsi_module(void) { unregister_ata_driver(&ata_ops); - scsi_unregister_host(&template); } module_init(init_idescsi_module); diff -Nru a/drivers/usb/class/audio.c b/drivers/usb/class/audio.c --- a/drivers/usb/class/audio.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/class/audio.c Sat Jul 20 12:12:35 2002 @@ -2091,11 +2091,11 @@ } static /*const*/ struct file_operations usb_mixer_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - ioctl: usb_audio_ioctl_mixdev, - open: usb_audio_open_mixdev, - release: usb_audio_release_mixdev, + .owner = THIS_MODULE, + .llseek = no_llseek, + .ioctl = usb_audio_ioctl_mixdev, + .open = usb_audio_open_mixdev, + .release = usb_audio_release_mixdev, }; /* --------------------------------------------------------------------- */ @@ -2727,15 +2727,15 @@ } static /*const*/ struct file_operations usb_audio_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - read: usb_audio_read, - write: usb_audio_write, - poll: usb_audio_poll, - ioctl: usb_audio_ioctl, - mmap: usb_audio_mmap, - open: usb_audio_open, - release: usb_audio_release, + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = usb_audio_read, + .write = usb_audio_write, + .poll = usb_audio_poll, + .ioctl = usb_audio_ioctl, + .mmap = usb_audio_mmap, + .open = usb_audio_open, + .release = usb_audio_release, }; /* --------------------------------------------------------------------- */ @@ -2753,11 +2753,11 @@ MODULE_DEVICE_TABLE (usb, usb_audio_ids); static struct usb_driver usb_audio_driver = { - name: "audio", - probe: usb_audio_probe, - disconnect: usb_audio_disconnect, - driver_list: LIST_HEAD_INIT(usb_audio_driver.driver_list), - id_table: usb_audio_ids, + .name = "audio", + .probe = usb_audio_probe, + .disconnect = usb_audio_disconnect, + .driver_list = LIST_HEAD_INIT(usb_audio_driver.driver_list), + .id_table = usb_audio_ids, }; static void *find_descriptor(void *descstart, unsigned int desclen, void *after, diff -Nru a/drivers/usb/class/bluetty.c b/drivers/usb/class/bluetty.c --- a/drivers/usb/class/bluetty.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/class/bluetty.c Sat Jul 20 12:12:35 2002 @@ -234,10 +234,10 @@ MODULE_DEVICE_TABLE (usb, usb_bluetooth_ids); static struct usb_driver usb_bluetooth_driver = { - name: "bluetty", - probe: usb_bluetooth_probe, - disconnect: usb_bluetooth_disconnect, - id_table: usb_bluetooth_ids, + .name = "bluetty", + .probe = usb_bluetooth_probe, + .disconnect = usb_bluetooth_disconnect, + .id_table = usb_bluetooth_ids, }; static int bluetooth_refcount; @@ -1284,30 +1284,30 @@ static struct tty_driver bluetooth_tty_driver = { - magic: TTY_DRIVER_MAGIC, - driver_name: "usb-bluetooth", - name: "usb/ttub/%d", - major: BLUETOOTH_TTY_MAJOR, - minor_start: 0, - num: BLUETOOTH_TTY_MINORS, - type: TTY_DRIVER_TYPE_SERIAL, - subtype: SERIAL_TYPE_NORMAL, - flags: TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, - - refcount: &bluetooth_refcount, - table: bluetooth_tty, - termios: bluetooth_termios, - termios_locked: bluetooth_termios_locked, - - open: bluetooth_open, - close: bluetooth_close, - write: bluetooth_write, - write_room: bluetooth_write_room, - ioctl: bluetooth_ioctl, - set_termios: bluetooth_set_termios, - throttle: bluetooth_throttle, - unthrottle: bluetooth_unthrottle, - chars_in_buffer: bluetooth_chars_in_buffer, + .magic = TTY_DRIVER_MAGIC, + .driver_name = "usb-bluetooth", + .name = "usb/ttub/%d", + .major = BLUETOOTH_TTY_MAJOR, + .minor_start = 0, + .num = BLUETOOTH_TTY_MINORS, + .type = TTY_DRIVER_TYPE_SERIAL, + .subtype = SERIAL_TYPE_NORMAL, + .flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, + + .refcount = &bluetooth_refcount, + .table = bluetooth_tty, + .termios = bluetooth_termios, + .termios_locked = bluetooth_termios_locked, + + .open = bluetooth_open, + .close = bluetooth_close, + .write = bluetooth_write, + .write_room = bluetooth_write_room, + .ioctl = bluetooth_ioctl, + .set_termios = bluetooth_set_termios, + .throttle = bluetooth_throttle, + .unthrottle = bluetooth_unthrottle, + .chars_in_buffer = bluetooth_chars_in_buffer, }; diff -Nru a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c --- a/drivers/usb/class/cdc-acm.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/class/cdc-acm.c Sat Jul 20 12:12:34 2002 @@ -683,10 +683,10 @@ MODULE_DEVICE_TABLE (usb, acm_ids); static struct usb_driver acm_driver = { - name: "acm", - probe: acm_probe, - disconnect: acm_disconnect, - id_table: acm_ids, + .name = "acm", + .probe = acm_probe, + .disconnect = acm_disconnect, + .id_table = acm_ids, }; /* @@ -700,32 +700,32 @@ static struct termios *acm_tty_termios_locked[ACM_TTY_MINORS]; static struct tty_driver acm_tty_driver = { - magic: TTY_DRIVER_MAGIC, - driver_name: "acm", - name: "usb/acm/%d", - major: ACM_TTY_MAJOR, - minor_start: 0, - num: ACM_TTY_MINORS, - type: TTY_DRIVER_TYPE_SERIAL, - subtype: SERIAL_TYPE_NORMAL, - flags: TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, - - refcount: &acm_tty_refcount, - - table: acm_tty_table, - termios: acm_tty_termios, - termios_locked: acm_tty_termios_locked, - - open: acm_tty_open, - close: acm_tty_close, - write: acm_tty_write, - write_room: acm_tty_write_room, - ioctl: acm_tty_ioctl, - throttle: acm_tty_throttle, - unthrottle: acm_tty_unthrottle, - chars_in_buffer: acm_tty_chars_in_buffer, - break_ctl: acm_tty_break_ctl, - set_termios: acm_tty_set_termios + .magic = TTY_DRIVER_MAGIC, + .driver_name = "acm", + .name = "usb/acm/%d", + .major = ACM_TTY_MAJOR, + .minor_start = 0, + .num = ACM_TTY_MINORS, + .type = TTY_DRIVER_TYPE_SERIAL, + .subtype = SERIAL_TYPE_NORMAL, + .flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, + + .refcount = &acm_tty_refcount, + + .table = acm_tty_table, + .termios = acm_tty_termios, + .termios_locked = acm_tty_termios_locked, + + .open = acm_tty_open, + .close = acm_tty_close, + .write = acm_tty_write, + .write_room = acm_tty_write_room, + .ioctl = acm_tty_ioctl, + .throttle = acm_tty_throttle, + .unthrottle = acm_tty_unthrottle, + .chars_in_buffer = acm_tty_chars_in_buffer, + .break_ctl = acm_tty_break_ctl, + .set_termios = acm_tty_set_termios }; /* diff -Nru a/drivers/usb/class/printer.c b/drivers/usb/class/printer.c --- a/drivers/usb/class/printer.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/class/printer.c Sat Jul 20 12:12:35 2002 @@ -655,7 +655,10 @@ (count - writecount) : USBLP_BUF_SIZE; if (copy_from_user(usblp->writeurb->transfer_buffer, buffer + writecount, - usblp->writeurb->transfer_buffer_length)) return -EFAULT; + usblp->writeurb->transfer_buffer_length)) { + up(&usblp->sem); + return writecount ? writecount : -EFAULT; + } usblp->writeurb->dev = usblp->dev; usblp->wcomplete = 0; @@ -783,13 +786,13 @@ } static struct file_operations usblp_fops = { - owner: THIS_MODULE, - read: usblp_read, - write: usblp_write, - poll: usblp_poll, - ioctl: usblp_ioctl, - open: usblp_open, - release: usblp_release, + .owner = THIS_MODULE, + .read = usblp_read, + .write = usblp_write, + .poll = usblp_poll, + .ioctl = usblp_ioctl, + .open = usblp_open, + .release = usblp_release, }; static void *usblp_probe(struct usb_device *dev, unsigned int ifnum, @@ -1097,11 +1100,11 @@ MODULE_DEVICE_TABLE (usb, usblp_ids); static struct usb_driver usblp_driver = { - owner: THIS_MODULE, - name: "usblp", - probe: usblp_probe, - disconnect: usblp_disconnect, - id_table: usblp_ids, + .owner = THIS_MODULE, + .name = "usblp", + .probe = usblp_probe, + .disconnect = usblp_disconnect, + .id_table = usblp_ids, }; static int __init usblp_init(void) diff -Nru a/drivers/usb/class/usb-midi.c b/drivers/usb/class/usb-midi.c --- a/drivers/usb/class/usb-midi.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/class/usb-midi.c Sat Jul 20 12:12:34 2002 @@ -988,12 +988,12 @@ } static struct file_operations usb_midi_fops = { - llseek: usb_midi_llseek, - read: usb_midi_read, - write: usb_midi_write, - poll: usb_midi_poll, - open: usb_midi_open, - release: usb_midi_release, + .llseek = usb_midi_llseek, + .read = usb_midi_read, + .write = usb_midi_write, + .poll = usb_midi_poll, + .open = usb_midi_open, + .release = usb_midi_release, }; /* ------------------------------------------------------------------------- */ @@ -2095,11 +2095,11 @@ static struct usb_driver usb_midi_driver = { - name: "midi", - probe: usb_midi_probe, - disconnect: usb_midi_disconnect, - id_table: NULL, /* check all devices */ - driver_list: LIST_HEAD_INIT(usb_midi_driver.driver_list) + .name = "midi", + .probe = usb_midi_probe, + .disconnect = usb_midi_disconnect, + .id_table = NULL, /* check all devices */ + .driver_list = LIST_HEAD_INIT(usb_midi_driver.driver_list) }; /* ------------------------------------------------------------------------- */ @@ -2168,15 +2168,15 @@ static snd_rawmidi_ops_t snd_usbmidi_output = { - open: snd_usbmidi_output_open, - close: snd_usbmidi_output_close, - trigger: snd_usbmidi_output_trigger, + .open = snd_usbmidi_output_open, + .close = snd_usbmidi_output_close, + .trigger = snd_usbmidi_output_trigger, }; static snd_rawmidi_ops_t snd_usbmidi_input = { - open: snd_usbmidi_input_open, - close: snd_usbmidi_input_close, - trigger: snd_usbmidi_input_trigger, + .open = snd_usbmidi_input_open, + .close = snd_usbmidi_input_close, + .trigger = snd_usbmidi_input_trigger, }; int snd_usbmidi_midi(cs46xx_t *chip, int device, snd_rawmidi_t **rrawmidi) @@ -2211,7 +2211,7 @@ int err, idx; snd_region_t *region; static snd_device_opt_t ops = { - dev_free: snd_usbmidi_dev_free, + .dev_free = snd_usbmidi_dev_free, }; *rchip = NULL; diff -Nru a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c --- a/drivers/usb/core/devices.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/core/devices.c Sat Jul 20 12:12:35 2002 @@ -152,8 +152,8 @@ void usbdevfs_conn_disc_event(void) { - wake_up(&deviceconndiscwq); conndiscevcnt++; + wake_up(&deviceconndiscwq); } static const char *class_decode(const int class) @@ -239,6 +239,7 @@ if (start > end) return start; + lock_kernel(); /* driver might be unloaded */ start += sprintf(start, format_iface, desc->bInterfaceNumber, desc->bAlternateSetting, @@ -248,6 +249,7 @@ desc->bInterfaceSubClass, desc->bInterfaceProtocol, iface->driver ? iface->driver->name : "(none)"); + unlock_kernel(); return start; } @@ -597,6 +599,13 @@ unlock_kernel(); return POLLIN; } + + /* we may have dropped BKL - need to check for having lost the race */ + if (file->private_data) { + kfree(st); + goto lost_race; + } + /* * need to prevent the module from being unloaded, since * proc_unregister does not call the release method and @@ -606,6 +615,7 @@ file->private_data = st; mask = POLLIN; } +lost_race: if (file->f_mode & FMODE_READ) poll_wait(file, &deviceconndiscwq, wait); if (st->lastev != conndiscevcnt) @@ -656,9 +666,9 @@ } struct file_operations usbdevfs_devices_fops = { - llseek: usb_device_lseek, - read: usb_device_read, - poll: usb_device_poll, - open: usb_device_open, - release: usb_device_release, + .llseek = usb_device_lseek, + .read = usb_device_read, + .poll = usb_device_poll, + .open = usb_device_open, + .release = usb_device_release, }; diff -Nru a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c --- a/drivers/usb/core/devio.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/core/devio.c Sat Jul 20 12:12:35 2002 @@ -324,9 +324,9 @@ } struct usb_driver usbdevfs_driver = { - name: "usbfs", - probe: driver_probe, - disconnect: driver_disconnect, + .name = "usbfs", + .probe = driver_probe, + .disconnect = driver_disconnect, }; static int claimintf(struct dev_state *ps, unsigned int intf) @@ -361,14 +361,14 @@ if (intf >= 8*sizeof(ps->ifclaimed)) return -EINVAL; err = -EINVAL; - lock_kernel(); dev = ps->dev; + down(&dev->serialize); if (dev && test_and_clear_bit(intf, &ps->ifclaimed)) { iface = &dev->actconfig->interface[intf]; usb_driver_release_interface(&usbdevfs_driver, iface); err = 0; } - unlock_kernel(); + up(&dev->serialize); return err; } @@ -722,14 +722,11 @@ if (test_bit(i, &ps->ifclaimed)) continue; - if (intf->driver) { - const struct usb_device_id *id; - down(&intf->driver->serialize); - intf->driver->disconnect(ps->dev, intf->private_data); - id = usb_match_id(ps->dev,intf,intf->driver->id_table); - intf->driver->probe(ps->dev, i, id); - up(&intf->driver->serialize); + lock_kernel(); + if (intf->driver && ps->dev) { + usb_bind_driver(intf->driver,ps->dev, i); } + unlock_kernel(); } return 0; @@ -1092,16 +1089,17 @@ /* disconnect kernel driver from interface, leaving it unbound. */ case USBDEVFS_DISCONNECT: + /* this function is voodoo. without locking it is a maybe thing */ + lock_kernel(); driver = ifp->driver; if (driver) { - down (&driver->serialize); dbg ("disconnect '%s' from dev %d interface %d", driver->name, ps->dev->devnum, ctrl.ifno); - driver->disconnect (ps->dev, ifp->private_data); + usb_unbind_driver(ps->dev, ifp); usb_driver_release_interface (driver, ifp); - up (&driver->serialize); } else retval = -EINVAL; + unlock_kernel(); break; /* let kernel drivers try to (re)bind to the interface */ @@ -1111,18 +1109,28 @@ /* talk directly to the interface's driver */ default: + lock_kernel(); /* against module unload */ driver = ifp->driver; - if (driver == 0 || driver->ioctl == 0) - retval = -ENOSYS; - else { - if (ifp->driver->owner) + if (driver == 0 || driver->ioctl == 0) { + unlock_kernel(); + retval = -ENOSYS; + } else { + if (ifp->driver->owner) { __MOD_INC_USE_COUNT(ifp->driver->owner); + unlock_kernel(); + } /* ifno might usefully be passed ... */ retval = driver->ioctl (ps->dev, ctrl.ioctl_code, buf); /* size = min_t(int, size, retval)? */ - if (ifp->driver->owner) + if (ifp->driver->owner) { __MOD_DEC_USE_COUNT(ifp->driver->owner); + } else { + unlock_kernel(); + } } + + if (retval == -ENOIOCTLCMD) + retval = -ENOTTY; } /* cleanup and return */ @@ -1139,7 +1147,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct dev_state *ps = (struct dev_state *)file->private_data; - int ret = -ENOIOCTLCMD; + int ret = -ENOTTY; if (!(file->f_mode & FMODE_WRITE)) return -EPERM; @@ -1248,10 +1256,10 @@ } struct file_operations usbdevfs_device_file_operations = { - llseek: usbdev_lseek, - read: usbdev_read, - poll: usbdev_poll, - ioctl: usbdev_ioctl, - open: usbdev_open, - release: usbdev_release, + .llseek = usbdev_lseek, + .read = usbdev_read, + .poll = usbdev_poll, + .ioctl = usbdev_ioctl, + .open = usbdev_open, + .release = usbdev_release, }; diff -Nru a/drivers/usb/core/drivers.c b/drivers/usb/core/drivers.c --- a/drivers/usb/core/drivers.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/core/drivers.c Sat Jul 20 12:12:34 2002 @@ -66,6 +66,7 @@ start = page; end = page + (PAGE_SIZE - 100); pos = *ppos; + lock_kernel(); /* else drivers might be unloaded */ for (; tmp != &usb_driver_list; tmp = tmp->next) { struct usb_driver *driver = list_entry(tmp, struct usb_driver, driver_list); int minor = driver->fops ? driver->minor : -1; @@ -80,6 +81,7 @@ break; } } + unlock_kernel(); if (start == page) start += sprintf(start, "(none)\n"); len = start - page; @@ -120,6 +122,6 @@ } struct file_operations usbdevfs_drivers_fops = { - llseek: usb_driver_lseek, - read: usb_driver_read, + .llseek = usb_driver_lseek, + .read = usb_driver_read, }; diff -Nru a/drivers/usb/core/file.c b/drivers/usb/core/file.c --- a/drivers/usb/core/file.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/core/file.c Sat Jul 20 12:12:35 2002 @@ -44,10 +44,13 @@ spin_lock (&minor_lock); c = usb_minors[minor]; - spin_unlock (&minor_lock); - if (!c || !(new_fops = fops_get(c))) + if (!c || !(new_fops = fops_get(c))) { + spin_unlock(&minor_lock); return err; + } + spin_unlock(&minor_lock); + old_fops = file->f_op; file->f_op = new_fops; /* Curiouser and curiouser... NULL ->open() as "no device" ? */ @@ -62,8 +65,8 @@ } static struct file_operations usb_fops = { - owner: THIS_MODULE, - open: usb_open, + .owner = THIS_MODULE, + .open = usb_open, }; int usb_major_init(void) diff -Nru a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c --- a/drivers/usb/core/hcd.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/core/hcd.c Sat Jul 20 12:12:35 2002 @@ -1240,11 +1240,11 @@ * bus glue for non-PCI system busses will need to use this. */ struct usb_operations usb_hcd_operations = { - allocate: hcd_alloc_dev, - get_frame_number: hcd_get_frame_number, - submit_urb: hcd_submit_urb, - unlink_urb: hcd_unlink_urb, - deallocate: hcd_free_dev, + .allocate = hcd_alloc_dev, + .get_frame_number = hcd_get_frame_number, + .submit_urb = hcd_submit_urb, + .unlink_urb = hcd_unlink_urb, + .deallocate = hcd_free_dev, }; EXPORT_SYMBOL (usb_hcd_operations); diff -Nru a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c --- a/drivers/usb/core/hub.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/core/hub.c Sat Jul 20 12:12:35 2002 @@ -1046,8 +1046,6 @@ static int usb_hub_thread(void *__hub) { - lock_kernel(); - /* * This thread doesn't need any user-level access, * so get rid of all our resources @@ -1067,8 +1065,6 @@ } while (!signal_pending(current)); dbg("usb_hub_thread exiting"); - - unlock_kernel(); complete_and_exit(&khubd_exited, 0); } @@ -1083,11 +1079,11 @@ MODULE_DEVICE_TABLE (usb, hub_id_table); static struct usb_driver hub_driver = { - name: "hub", - probe: hub_probe, - ioctl: hub_ioctl, - disconnect: hub_disconnect, - id_table: hub_id_table, + .name = "hub", + .probe = hub_probe, + .ioctl = hub_ioctl, + .disconnect = hub_disconnect, + .id_table = hub_id_table, }; /* diff -Nru a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c --- a/drivers/usb/core/inode.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/core/inode.c Sat Jul 20 12:12:34 2002 @@ -284,23 +284,23 @@ } static struct file_operations default_file_operations = { - read: default_read_file, - write: default_write_file, - open: default_open, - llseek: default_file_lseek, + .read = default_read_file, + .write = default_write_file, + .open = default_open, + .llseek = default_file_lseek, }; static struct inode_operations usbfs_dir_inode_operations = { - create: usbfs_create, - lookup: simple_lookup, - unlink: usbfs_unlink, - mkdir: usbfs_mkdir, - rmdir: usbfs_rmdir, + .create = usbfs_create, + .lookup = simple_lookup, + .unlink = usbfs_unlink, + .mkdir = usbfs_mkdir, + .rmdir = usbfs_rmdir, }; static struct super_operations usbfs_ops = { - statfs: simple_statfs, - drop_inode: generic_delete_inode, + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, }; static int usbfs_fill_super(struct super_block *sb, void *data, int silent) @@ -468,17 +468,17 @@ } static struct file_system_type usbdevice_fs_type = { - owner: THIS_MODULE, - name: "usbdevfs", - get_sb: usb_get_sb, - kill_sb: kill_anon_super, + .owner = THIS_MODULE, + .name = "usbdevfs", + .get_sb = usb_get_sb, + .kill_sb = kill_anon_super, }; static struct file_system_type usb_fs_type = { - owner: THIS_MODULE, - name: "usbfs", - get_sb: usb_get_sb, - kill_sb: kill_anon_super, + .owner = THIS_MODULE, + .name = "usbfs", + .get_sb = usb_get_sb, + .kill_sb = kill_anon_super, }; /* --------------------------------------------------------------------- */ diff -Nru a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c --- a/drivers/usb/core/usb.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/core/usb.c Sat Jul 20 12:12:35 2002 @@ -32,6 +32,7 @@ #include #include #include +#include #ifdef CONFIG_USB_DEBUG #define DEBUG @@ -117,6 +118,108 @@ up (&usb_bus_list_lock); } +/** + * usb_unbind_driver - disconnects a driver from a device + * @device: usb device to be disconnected + * @intf: interface of the device to be disconnected + * Context: BKL held + * + * Handles module usage count correctly + */ + +void usb_unbind_driver(struct usb_device *device, struct usb_interface *intf) +{ + struct usb_driver *driver; + void *priv; + int m; + + + driver = intf->driver; + priv = intf->private_data; + + if (!driver) + return; + + /* as soon as we increase the module use count we drop the BKL + before that we must not sleep */ + if (driver->owner) { + m = try_inc_mod_count(driver->owner); + if (m == 0) { + err("Dieing driver still bound to device.\n"); + return; + } + unlock_kernel(); + } + down(&driver->serialize); /* if we sleep here on an umanaged driver + the holder of the lock guards against + module unload */ + + driver->disconnect(device, priv); + + up(&driver->serialize); + if (driver->owner) { + lock_kernel(); + __MOD_DEC_USE_COUNT(driver->owner); + } +} + +/** + * usb_bind_driver - connect a driver to a device's interface + * @driver: device driver to be bound to a devices interface + * @dev: device to be bound + * @ifnum: index number of the interface to be used + * + * Does a save binding of a driver to a device's interface + * Returns a pointer to the drivers private description of the binding + */ + +void *usb_bind_driver(struct usb_driver *driver, struct usb_device *dev, unsigned int ifnum) +{ + int i,m; + void *private = NULL; + const struct usb_device_id *id; + struct usb_interface *interface; + + if (driver->owner) { + m = try_inc_mod_count(driver->owner); + if (m == 0) + return NULL; /* this horse is dead - don't ride*/ + unlock_kernel(); + } + + interface = &dev->actconfig->interface[ifnum]; + + id = driver->id_table; + /* new style driver? */ + if (id) { + for (i = 0; i < interface->num_altsetting; i++) { + interface->act_altsetting = i; + id = usb_match_id(dev, interface, id); + if (id) { + down(&driver->serialize); + private = driver->probe(dev,ifnum,id); + up(&driver->serialize); + if (private != NULL) + break; + } + } + + /* if driver not bound, leave defaults unchanged */ + if (private == NULL) + interface->act_altsetting = 0; + } else { /* "old style" driver */ + down(&driver->serialize); + private = driver->probe(dev, ifnum, NULL); + up(&driver->serialize); + } + if (driver->owner) { + lock_kernel(); + __MOD_DEC_USE_COUNT(driver->owner); + } + + return private; +} + /* * This function is part of a depth-first search down the device tree, * removing any instances of a device driver. @@ -136,18 +239,12 @@ if (!dev->actconfig) return; - + for (i = 0; i < dev->actconfig->bNumInterfaces; i++) { struct usb_interface *interface = &dev->actconfig->interface[i]; - + if (interface->driver == driver) { - if (driver->owner) - __MOD_INC_USE_COUNT(driver->owner); - down(&driver->serialize); - driver->disconnect(dev, interface->private_data); - up(&driver->serialize); - if (driver->owner) - __MOD_DEC_USE_COUNT(driver->owner); + usb_unbind_driver(dev, interface); /* if driver->disconnect didn't release the interface */ if (interface->driver) usb_driver_release_interface(driver, interface); @@ -163,7 +260,7 @@ /** * usb_deregister - unregister a USB driver * @driver: USB operations of the driver to unregister - * Context: !in_interrupt () + * Context: !in_interrupt (), must be called with BKL held * * Unlinks the specified driver from the internal USB driver list. * @@ -528,9 +625,7 @@ struct list_head *tmp; struct usb_interface *interface; void *private; - const struct usb_device_id *id; struct usb_driver *driver; - int i; if ((!dev) || (ifnum >= dev->actconfig->bNumInterfaces)) { err("bad find_interface_driver params"); @@ -545,37 +640,12 @@ goto out_err; private = NULL; + lock_kernel(); for (tmp = usb_driver_list.next; tmp != &usb_driver_list;) { driver = list_entry(tmp, struct usb_driver, driver_list); tmp = tmp->next; - if (driver->owner) - __MOD_INC_USE_COUNT(driver->owner); - id = driver->id_table; - /* new style driver? */ - if (id) { - for (i = 0; i < interface->num_altsetting; i++) { - interface->act_altsetting = i; - id = usb_match_id(dev, interface, id); - if (id) { - down(&driver->serialize); - private = driver->probe(dev,ifnum,id); - up(&driver->serialize); - if (private != NULL) - break; - } - } - - /* if driver not bound, leave defaults unchanged */ - if (private == NULL) - interface->act_altsetting = 0; - } else { /* "old style" driver */ - down(&driver->serialize); - private = driver->probe(dev, ifnum, NULL); - up(&driver->serialize); - } - if (driver->owner) - __MOD_DEC_USE_COUNT(driver->owner); + private = usb_bind_driver(driver, dev, ifnum); /* probe() may have changed the config on us */ interface = dev->actconfig->interface + ifnum; @@ -583,9 +653,11 @@ if (private) { usb_driver_claim_interface(driver, interface, private); up(&dev->serialize); + unlock_kernel(); return 0; } } + unlock_kernel(); out_err: up(&dev->serialize); @@ -764,9 +836,9 @@ return sprintf (buf, "%u\n", udev->actconfig->bConfigurationValue); } static struct driver_file_entry usb_config_entry = { - name: "configuration", - mode: S_IRUGO, - show: show_config, + .name = "configuration", + .mode = S_IRUGO, + .show = show_config, }; /* interfaces have one current setting; alternates @@ -783,9 +855,9 @@ return sprintf (buf, "%u\n", interface->altsetting->bAlternateSetting); } static struct driver_file_entry usb_altsetting_entry = { - name: "altsetting", - mode: S_IRUGO, - show: show_altsetting, + .name = "altsetting", + .mode = S_IRUGO, + .show = show_altsetting, }; /* product driverfs file */ @@ -804,9 +876,9 @@ return len+1; } static struct driver_file_entry usb_product_entry = { - name: "product", - mode: S_IRUGO, - show: show_product, + .name = "product", + .mode = S_IRUGO, + .show = show_product, }; /* manufacturer driverfs file */ @@ -826,9 +898,9 @@ return len+1; } static struct driver_file_entry usb_manufacturer_entry = { - name: "manufacturer", - mode: S_IRUGO, - show: show_manufacturer, + .name = "manufacturer", + .mode = S_IRUGO, + .show = show_manufacturer, }; /* serial number driverfs file */ @@ -848,9 +920,9 @@ return len+1; } static struct driver_file_entry usb_serial_entry = { - name: "serial", - mode: S_IRUGO, - show: show_serial, + .name = "serial", + .mode = S_IRUGO, + .show = show_serial, }; /* @@ -1121,27 +1193,22 @@ info("USB disconnect on device %d", dev->devnum); + lock_kernel(); if (dev->actconfig) { for (i = 0; i < dev->actconfig->bNumInterfaces; i++) { struct usb_interface *interface = &dev->actconfig->interface[i]; struct usb_driver *driver = interface->driver; if (driver) { - if (driver->owner) - __MOD_INC_USE_COUNT(driver->owner); - down(&driver->serialize); - driver->disconnect(dev, interface->private_data); - up(&driver->serialize); + usb_unbind_driver(dev, interface); /* if driver->disconnect didn't release the interface */ if (interface->driver) usb_driver_release_interface(driver, interface); - /* we don't need the driver any longer */ - if (driver->owner) - __MOD_DEC_USE_COUNT(driver->owner); } /* remove our device node for this interface */ put_device(&interface->dev); } } + unlock_kernel(); /* Free up all the children.. */ for (i = 0; i < USB_MAXCHILDREN; i++) { @@ -1416,7 +1483,7 @@ #endif struct bus_type usb_bus_type = { - name: "usb", + .name = "usb", }; /* @@ -1475,6 +1542,8 @@ EXPORT_SYMBOL(usb_reset_device); EXPORT_SYMBOL(usb_connect); EXPORT_SYMBOL(usb_disconnect); +EXPORT_SYMBOL(usb_bind_driver); +EXPORT_SYMBOL(usb_unbind_driver); EXPORT_SYMBOL(__usb_get_extra_descriptor); diff -Nru a/drivers/usb/host/Config.in b/drivers/usb/host/Config.in --- a/drivers/usb/host/Config.in Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/Config.in Sat Jul 20 12:12:35 2002 @@ -6,6 +6,5 @@ dep_tristate ' OHCI HCD support' CONFIG_USB_OHCI_HCD $CONFIG_USB dep_tristate ' UHCI HCD (most Intel and VIA) support' CONFIG_USB_UHCI_HCD_ALT $CONFIG_USB if [ "$CONFIG_ARM" = "y" ]; then - dep_tristate ' SA1111 OHCI-compatible host interface support' CONFIG_USB_OHCI_SA1111 $CONFIG_USB dep_tristate ' SL811HS support' CONFIG_USB_SL811HS $CONFIG_USB fi diff -Nru a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile --- a/drivers/usb/host/Makefile Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/Makefile Sat Jul 20 12:12:35 2002 @@ -9,8 +9,6 @@ obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o obj-$(CONFIG_USB_UHCI_HCD_ALT) += uhci-hcd.o -obj-$(CONFIG_USB_OHCI) += usb-ohci.o usb-ohci-pci.o -obj-$(CONFIG_USB_OHCI_SA1111) += usb-ohci.o usb-ohci-sa1111.o obj-$(CONFIG_USB_SL811HS) += hc_sl811.o include $(TOPDIR)/Rules.make diff -Nru a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c --- a/drivers/usb/host/ehci-hcd.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/ehci-hcd.c Sat Jul 20 12:12:35 2002 @@ -702,47 +702,47 @@ static const char hcd_name [] = "ehci-hcd"; static const struct hc_driver ehci_driver = { - description: hcd_name, + .description = hcd_name, /* * generic hardware linkage */ - irq: ehci_irq, - flags: HCD_MEMORY | HCD_USB2, + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, /* * basic lifecycle operations */ - start: ehci_start, + .start = ehci_start, #ifdef CONFIG_PM - suspend: ehci_suspend, - resume: ehci_resume, + .suspend = ehci_suspend, + .resume = ehci_resume, #endif - stop: ehci_stop, + .stop = ehci_stop, /* * memory lifecycle (except per-request) */ - hcd_alloc: ehci_hcd_alloc, - hcd_free: ehci_hcd_free, + .hcd_alloc = ehci_hcd_alloc, + .hcd_free = ehci_hcd_free, /* * managing i/o requests and associated device resources */ - urb_enqueue: ehci_urb_enqueue, - urb_dequeue: ehci_urb_dequeue, - free_config: ehci_free_config, + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .free_config = ehci_free_config, /* * scheduling support */ - get_frame_number: ehci_get_frame, + .get_frame_number = ehci_get_frame, /* * root hub support */ - hub_status_data: ehci_hub_status_data, - hub_control: ehci_hub_control, + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, }; /*-------------------------------------------------------------------------*/ @@ -754,15 +754,15 @@ /* handle any USB 2.0 EHCI controller */ - class: ((PCI_CLASS_SERIAL_USB << 8) | 0x20), - class_mask: ~0, - driver_data: (unsigned long) &ehci_driver, + .class = ((PCI_CLASS_SERIAL_USB << 8) | 0x20), + .class_mask = ~0, + .driver_data = (unsigned long) &ehci_driver, /* no matter who makes it */ - vendor: PCI_ANY_ID, - device: PCI_ANY_ID, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; @@ -770,15 +770,15 @@ /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver ehci_pci_driver = { - name: (char *) hcd_name, - id_table: pci_ids, + .name = (char *) hcd_name, + .id_table = pci_ids, - probe: usb_hcd_pci_probe, - remove: usb_hcd_pci_remove, + .probe = usb_hcd_pci_probe, + .remove = usb_hcd_pci_remove, #ifdef CONFIG_PM - suspend: usb_hcd_pci_suspend, - resume: usb_hcd_pci_resume, + .suspend = usb_hcd_pci_suspend, + .resume = usb_hcd_pci_resume, #endif }; diff -Nru a/drivers/usb/host/hc_simple.c b/drivers/usb/host/hc_simple.c --- a/drivers/usb/host/hc_simple.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/hc_simple.c Sat Jul 20 12:12:35 2002 @@ -343,11 +343,11 @@ **************************************************************************/ static struct usb_operations hci_device_operations = { - allocate: hci_alloc_dev, - deallocate: hci_free_dev, - get_frame_number: hci_get_current_frame_number, - submit_urb: hci_submit_urb, - unlink_urb: hci_unlink_urb, + .allocate = hci_alloc_dev, + .deallocate = hci_free_dev, + .get_frame_number = hci_get_current_frame_number, + .submit_urb = hci_submit_urb, + .unlink_urb = hci_unlink_urb, }; /*************************************************************************** diff -Nru a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c --- a/drivers/usb/host/ohci-hcd.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/host/ohci-hcd.c Sat Jul 20 12:12:34 2002 @@ -167,7 +167,7 @@ else if ((urb->transfer_flags & USB_ZERO_PACKET) != 0 && (urb->transfer_buffer_length % usb_maxpacket (urb->dev, pipe, - usb_pipeout (pipe))) != 0) + usb_pipeout (pipe))) == 0) size++; break; case PIPE_ISOCHRONOUS: /* number of packets from URB */ diff -Nru a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c --- a/drivers/usb/host/ohci-pci.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/ohci-pci.c Sat Jul 20 12:12:35 2002 @@ -284,47 +284,47 @@ /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_pci_hc_driver = { - description: hcd_name, + .description = hcd_name, /* * generic hardware linkage */ - irq: ohci_irq, - flags: HCD_MEMORY | HCD_USB11, + .irq = ohci_irq, + .flags = HCD_MEMORY | HCD_USB11, /* * basic lifecycle operations */ - start: ohci_pci_start, + .start = ohci_pci_start, #ifdef CONFIG_PM - suspend: ohci_pci_suspend, - resume: ohci_pci_resume, + .suspend = ohci_pci_suspend, + .resume = ohci_pci_resume, #endif - stop: ohci_stop, + .stop = ohci_stop, /* * memory lifecycle (except per-request) */ - hcd_alloc: ohci_hcd_alloc, - hcd_free: ohci_hcd_free, + .hcd_alloc = ohci_hcd_alloc, + .hcd_free = ohci_hcd_free, /* * managing i/o requests and associated device resources */ - urb_enqueue: ohci_urb_enqueue, - urb_dequeue: ohci_urb_dequeue, - free_config: ohci_free_config, + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .free_config = ohci_free_config, /* * scheduling support */ - get_frame_number: ohci_get_frame, + .get_frame_number = ohci_get_frame, /* * root hub support */ - hub_status_data: ohci_hub_status_data, - hub_control: ohci_hub_control, + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_hub_control, }; /*-------------------------------------------------------------------------*/ @@ -333,15 +333,15 @@ static const struct pci_device_id __devinitdata pci_ids [] = { { /* handle any USB OHCI controller */ - class: (PCI_CLASS_SERIAL_USB << 8) | 0x10, - class_mask: ~0, - driver_data: (unsigned long) &ohci_pci_hc_driver, + .class = (PCI_CLASS_SERIAL_USB << 8) | 0x10, + .class_mask = ~0, + .driver_data = (unsigned long) &ohci_pci_hc_driver, /* no matter who makes it */ - vendor: PCI_ANY_ID, - device: PCI_ANY_ID, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; @@ -349,15 +349,15 @@ /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver ohci_pci_driver = { - name: (char *) hcd_name, - id_table: pci_ids, + .name = (char *) hcd_name, + .id_table = pci_ids, - probe: usb_hcd_pci_probe, - remove: usb_hcd_pci_remove, + .probe = usb_hcd_pci_probe, + .remove = usb_hcd_pci_remove, #ifdef CONFIG_PM - suspend: usb_hcd_pci_suspend, - resume: usb_hcd_pci_resume, + .suspend = usb_hcd_pci_suspend, + .resume = usb_hcd_pci_resume, #endif }; diff -Nru a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c --- a/drivers/usb/host/ohci-sa1111.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/ohci-sa1111.c Sat Jul 20 12:12:35 2002 @@ -288,47 +288,47 @@ /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_sa1111_hc_driver = { - description: hcd_name, + .description = hcd_name, /* * generic hardware linkage */ - irq: ohci_irq, - flags: HCD_USB11, + .irq = ohci_irq, + .flags = HCD_USB11, /* * basic lifecycle operations */ - start: ohci_sa1111_start, + .start = ohci_sa1111_start, #ifdef CONFIG_PM /* suspend: ohci_sa1111_suspend, -- tbd */ /* resume: ohci_sa1111_resume, -- tbd */ #endif - stop: ohci_stop, + .stop = ohci_stop, /* * memory lifecycle (except per-request) */ - hcd_alloc: ohci_hcd_alloc, - hcd_free: ohci_hcd_free, + .hcd_alloc = ohci_hcd_alloc, + .hcd_free = ohci_hcd_free, /* * managing i/o requests and associated device resources */ - urb_enqueue: ohci_urb_enqueue, - urb_dequeue: ohci_urb_dequeue, - free_config: ohci_free_config, + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .free_config = ohci_free_config, /* * scheduling support */ - get_frame_number: ohci_get_frame, + .get_frame_number = ohci_get_frame, /* * root hub support */ - hub_status_data: ohci_hub_status_data, - hub_control: ohci_hub_control, + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_hub_control, }; /*-------------------------------------------------------------------------*/ diff -Nru a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c --- a/drivers/usb/host/uhci-debug.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/host/uhci-debug.c Sat Jul 20 12:12:34 2002 @@ -571,11 +571,11 @@ } static struct file_operations uhci_proc_operations = { - open: uhci_proc_open, - llseek: uhci_proc_lseek, - read: uhci_proc_read, + .open = uhci_proc_open, + .llseek = uhci_proc_lseek, + .read = uhci_proc_read, // write: uhci_proc_write, - release: uhci_proc_release, + .release = uhci_proc_release, }; #endif diff -Nru a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c --- a/drivers/usb/host/uhci-hcd.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/host/uhci-hcd.c Sat Jul 20 12:12:35 2002 @@ -2476,45 +2476,45 @@ static const char hcd_name[] = "uhci-hcd"; static const struct hc_driver uhci_driver = { - description: hcd_name, + .description = hcd_name, /* Generic hardware linkage */ - irq: uhci_irq, - flags: HCD_USB11, + .irq = uhci_irq, + .flags = HCD_USB11, /* Basic lifecycle operations */ - start: uhci_start, + .start = uhci_start, #ifdef CONFIG_PM - suspend: uhci_suspend, - resume: uhci_resume, + .suspend = uhci_suspend, + .resume = uhci_resume, #endif - stop: uhci_stop, + .stop = uhci_stop, - hcd_alloc: uhci_hcd_alloc, - hcd_free: uhci_hcd_free, + .hcd_alloc = uhci_hcd_alloc, + .hcd_free = uhci_hcd_free, - urb_enqueue: uhci_urb_enqueue, - urb_dequeue: uhci_urb_dequeue, - free_config: NULL, + .urb_enqueue = uhci_urb_enqueue, + .urb_dequeue = uhci_urb_dequeue, + .free_config = NULL, - get_frame_number: uhci_hcd_get_frame_number, + .get_frame_number = uhci_hcd_get_frame_number, - hub_status_data: uhci_hub_status_data, - hub_control: uhci_hub_control, + .hub_status_data = uhci_hub_status_data, + .hub_control = uhci_hub_control, }; static const struct pci_device_id __devinitdata uhci_pci_ids[] = { { /* handle any USB UHCI controller */ - class: ((PCI_CLASS_SERIAL_USB << 8) | 0x00), - class_mask: ~0, - driver_data: (unsigned long) &uhci_driver, + .class = ((PCI_CLASS_SERIAL_USB << 8) | 0x00), + .class_mask = ~0, + .driver_data = (unsigned long) &uhci_driver, /* no matter who makes it */ - vendor: PCI_ANY_ID, - device: PCI_ANY_ID, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; @@ -2522,15 +2522,15 @@ MODULE_DEVICE_TABLE(pci, uhci_pci_ids); static struct pci_driver uhci_pci_driver = { - name: (char *)hcd_name, - id_table: uhci_pci_ids, + .name = (char *)hcd_name, + .id_table = uhci_pci_ids, - probe: usb_hcd_pci_probe, - remove: usb_hcd_pci_remove, + .probe = usb_hcd_pci_probe, + .remove = usb_hcd_pci_remove, #ifdef CONFIG_PM - suspend: usb_hcd_pci_suspend, - resume: usb_hcd_pci_resume, + .suspend = usb_hcd_pci_suspend, + .resume = usb_hcd_pci_resume, #endif /* PM */ }; diff -Nru a/drivers/usb/host/usb-ohci-pci.c b/drivers/usb/host/usb-ohci-pci.c --- a/drivers/usb/host/usb-ohci-pci.c Sat Jul 20 12:12:35 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,456 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include /* for in_interrupt() */ -#undef DEBUG -#include - -#include "../core/hcd.h" -#include "usb-ohci.h" - -#ifdef CONFIG_PMAC_PBOOK -#include -#include -#include -#ifndef CONFIG_PM -#define CONFIG_PM -#endif -#endif - -int __devinit -hc_add_ohci(struct pci_dev *dev, int irq, void *membase, unsigned long flags, - ohci_t **ohci, const char *name, const char *slot_name); -extern void hc_remove_ohci(ohci_t *ohci); -extern int hc_start (ohci_t * ohci, struct device *parent_dev); -extern int hc_reset (ohci_t * ohci); - -/*-------------------------------------------------------------------------*/ - -/* Increment the module usage count, start the control thread and - * return success. */ - -static struct pci_driver ohci_pci_driver; - -static int __devinit -hc_found_ohci (struct pci_dev *dev, int irq, - void *mem_base, const struct pci_device_id *id) -{ - u8 latency, limit; - ohci_t * ohci; - int ret; - - printk(KERN_INFO __FILE__ ": usb-%s, %s\n", dev->slot_name, dev->name); - - /* bad pci latencies can contribute to overruns */ - pci_read_config_byte (dev, PCI_LATENCY_TIMER, &latency); - if (latency) { - pci_read_config_byte (dev, PCI_MAX_LAT, &limit); - if (limit && limit < latency) { - dbg ("PCI latency reduced to max %d", limit); - pci_write_config_byte (dev, PCI_LATENCY_TIMER, limit); - latency = limit; - } - } - - ret = hc_add_ohci(dev, irq, mem_base, id->driver_data, - &ohci, ohci_pci_driver.name, dev->slot_name); - - if (ret == 0) { - ohci->pci_latency = latency; - - if (hc_start (ohci, &ohci->ohci_dev->dev) < 0) { - err ("can't start usb-%s", ohci->slot_name); - hc_remove_ohci(ohci); - return -EBUSY; - } - -#ifdef DEBUG - ohci_dump (ohci, 1); -#endif - } - - return ret; -} - -/*-------------------------------------------------------------------------*/ - -#ifdef CONFIG_PM - -/* controller died; cleanup debris, then restart */ -/* must not be called from interrupt context */ - -static void hc_restart (ohci_t *ohci) -{ - int temp; - int i; - - if (ohci->pci_latency) - pci_write_config_byte (ohci->ohci_dev, PCI_LATENCY_TIMER, ohci->pci_latency); - - ohci->disabled = 1; - ohci->sleeping = 0; - if (ohci->bus->root_hub) - usb_disconnect (&ohci->bus->root_hub); - - /* empty the interrupt branches */ - for (i = 0; i < NUM_INTS; i++) ohci->ohci_int_load[i] = 0; - for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table[i] = 0; - - /* no EDs to remove */ - ohci->ed_rm_list [0] = NULL; - ohci->ed_rm_list [1] = NULL; - - /* empty control and bulk lists */ - ohci->ed_isotail = NULL; - ohci->ed_controltail = NULL; - ohci->ed_bulktail = NULL; - - if ((temp = hc_reset (ohci)) < 0 || - (temp = hc_start (ohci, &ohci->ohci_dev->dev)) < 0) { - err ("can't restart usb-%s, %d", ohci->ohci_dev->slot_name, temp); - } else - dbg ("restart usb-%s completed", ohci->ohci_dev->slot_name); -} - -#endif /* CONFIG_PM */ - -/*-------------------------------------------------------------------------*/ - -/* configured so that an OHCI device is always provided */ -/* always called with process context; sleeping is OK */ - -static int __devinit -ohci_pci_probe (struct pci_dev *dev, const struct pci_device_id *id) -{ - unsigned long mem_resource, mem_len; - void *mem_base; - int status; - - if (pci_enable_device(dev) < 0) - return -ENODEV; - - if (!dev->irq) { - err("found OHCI device with no IRQ assigned. check BIOS settings!"); - pci_disable_device (dev); - return -ENODEV; - } - - /* we read its hardware registers as memory */ - mem_resource = pci_resource_start(dev, 0); - mem_len = pci_resource_len(dev, 0); - if (!request_mem_region (mem_resource, mem_len, ohci_pci_driver.name)) { - dbg ("controller already in use"); - pci_disable_device (dev); - return -EBUSY; - } - - mem_base = ioremap_nocache (mem_resource, mem_len); - if (!mem_base) { - err("Error mapping OHCI memory"); - release_mem_region(mem_resource, mem_len); - pci_disable_device (dev); - return -EFAULT; - } - - /* controller writes into our memory */ - pci_set_master (dev); - - status = hc_found_ohci (dev, dev->irq, mem_base, id); - if (status < 0) { - iounmap (mem_base); - release_mem_region(mem_resource, mem_len); - pci_disable_device (dev); - } - return status; -} - -/*-------------------------------------------------------------------------*/ - -/* may be called from interrupt context [interface spec] */ -/* may be called without controller present */ -/* may be called with controller, bus, and devices active */ - -static void __devexit -ohci_pci_remove (struct pci_dev *dev) -{ - ohci_t *ohci = (ohci_t *) pci_get_drvdata(dev); - void *membase = ohci->regs; - - dbg ("remove %s controller usb-%s%s%s", - hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS), - dev->slot_name, - ohci->disabled ? " (disabled)" : "", - in_interrupt () ? " in interrupt" : "" - ); - - hc_remove_ohci(ohci); - - /* unmap the IO address space */ - iounmap (membase); - - release_mem_region (pci_resource_start (dev, 0), pci_resource_len (dev, 0)); -} - - -#ifdef CONFIG_PM - -/*-------------------------------------------------------------------------*/ - -static int -ohci_pci_suspend (struct pci_dev *dev, u32 state) -{ - ohci_t *ohci = (ohci_t *) pci_get_drvdata(dev); - unsigned long flags; - u16 cmd; - - if ((ohci->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER) { - dbg ("can't suspend usb-%s (state is %s)", dev->slot_name, - hcfs2string (ohci->hc_control & OHCI_CTRL_HCFS)); - return -EIO; - } - - /* act as if usb suspend can always be used */ - info ("USB suspend: usb-%s", dev->slot_name); - ohci->sleeping = 1; - - /* First stop processing */ - spin_lock_irqsave (&usb_ed_lock, flags); - ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_IE); - writel (ohci->hc_control, &ohci->regs->control); - writel (OHCI_INTR_SF, &ohci->regs->intrstatus); - (void) readl (&ohci->regs->intrstatus); - spin_unlock_irqrestore (&usb_ed_lock, flags); - - /* Wait a frame or two */ - mdelay(1); - if (!readl (&ohci->regs->intrstatus) & OHCI_INTR_SF) - mdelay (1); - -#ifdef CONFIG_PMAC_PBOOK - if (_machine == _MACH_Pmac) - disable_irq (ohci->irq); - /* else, 2.4 assumes shared irqs -- don't disable */ -#endif - /* Enable remote wakeup */ - writel (readl(&ohci->regs->intrenable) | OHCI_INTR_RD, &ohci->regs->intrenable); - - /* Suspend chip and let things settle down a bit */ - ohci->hc_control = OHCI_USB_SUSPEND; - writel (ohci->hc_control, &ohci->regs->control); - (void) readl (&ohci->regs->control); - mdelay (500); /* No schedule here ! */ - switch (readl (&ohci->regs->control) & OHCI_CTRL_HCFS) { - case OHCI_USB_RESET: - dbg("Bus in reset phase ???"); - break; - case OHCI_USB_RESUME: - dbg("Bus in resume phase ???"); - break; - case OHCI_USB_OPER: - dbg("Bus in operational phase ???"); - break; - case OHCI_USB_SUSPEND: - dbg("Bus suspended"); - break; - } - /* In some rare situations, Apple's OHCI have happily trashed - * memory during sleep. We disable it's bus master bit during - * suspend - */ - pci_read_config_word (dev, PCI_COMMAND, &cmd); - cmd &= ~PCI_COMMAND_MASTER; - pci_write_config_word (dev, PCI_COMMAND, cmd); -#ifdef CONFIG_PMAC_PBOOK - { - struct device_node *of_node; - - /* Disable USB PAD & cell clock */ - of_node = pci_device_to_OF_node (ohci->ohci_dev); - if (of_node && _machine == _MACH_Pmac) - pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0); - } -#endif - return 0; -} - -/*-------------------------------------------------------------------------*/ - -static int -ohci_pci_resume (struct pci_dev *dev) -{ - ohci_t *ohci = (ohci_t *) pci_get_drvdata(dev); - int temp; - unsigned long flags; - - /* guard against multiple resumes */ - atomic_inc (&ohci->resume_count); - if (atomic_read (&ohci->resume_count) != 1) { - err ("concurrent PCI resumes for usb-%s", dev->slot_name); - atomic_dec (&ohci->resume_count); - return 0; - } - -#ifdef CONFIG_PMAC_PBOOK - { - struct device_node *of_node; - - /* Re-enable USB PAD & cell clock */ - of_node = pci_device_to_OF_node (ohci->ohci_dev); - if (of_node && _machine == _MACH_Pmac) - pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 1); - } -#endif - - /* did we suspend, or were we powered off? */ - ohci->hc_control = readl (&ohci->regs->control); - temp = ohci->hc_control & OHCI_CTRL_HCFS; - -#ifdef DEBUG - /* the registers may look crazy here */ - ohci_dump_status (ohci); -#endif - - /* Re-enable bus mastering */ - pci_set_master(ohci->ohci_dev); - - switch (temp) { - - case OHCI_USB_RESET: // lost power - info ("USB restart: usb-%s", dev->slot_name); - hc_restart (ohci); - break; - - case OHCI_USB_SUSPEND: // host wakeup - case OHCI_USB_RESUME: // remote wakeup - info ("USB continue: usb-%s from %s wakeup", dev->slot_name, - (temp == OHCI_USB_SUSPEND) - ? "host" : "remote"); - ohci->hc_control = OHCI_USB_RESUME; - writel (ohci->hc_control, &ohci->regs->control); - (void) readl (&ohci->regs->control); - mdelay (20); /* no schedule here ! */ - /* Some controllers (lucent) need a longer delay here */ - mdelay (15); - temp = readl (&ohci->regs->control); - temp = ohci->hc_control & OHCI_CTRL_HCFS; - if (temp != OHCI_USB_RESUME) { - err ("controller usb-%s won't resume", dev->slot_name); - ohci->disabled = 1; - return -EIO; - } - - /* Some chips likes being resumed first */ - writel (OHCI_USB_OPER, &ohci->regs->control); - (void) readl (&ohci->regs->control); - mdelay (3); - - /* Then re-enable operations */ - spin_lock_irqsave (&usb_ed_lock, flags); - ohci->disabled = 0; - ohci->sleeping = 0; - ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER; - if (!ohci->ed_rm_list[0] && !ohci->ed_rm_list[1]) { - if (ohci->ed_controltail) - ohci->hc_control |= OHCI_CTRL_CLE; - if (ohci->ed_bulktail) - ohci->hc_control |= OHCI_CTRL_BLE; - } - writel (ohci->hc_control, &ohci->regs->control); - writel (OHCI_INTR_SF, &ohci->regs->intrstatus); - writel (OHCI_INTR_SF, &ohci->regs->intrenable); - /* Check for a pending done list */ - writel (OHCI_INTR_WDH, &ohci->regs->intrdisable); - (void) readl (&ohci->regs->intrdisable); - spin_unlock_irqrestore (&usb_ed_lock, flags); -#ifdef CONFIG_PMAC_PBOOK - if (_machine == _MACH_Pmac) - enable_irq (ohci->irq); -#endif - if (ohci->hcca->done_head) - dl_done_list (ohci, dl_reverse_done_list (ohci)); - writel (OHCI_INTR_WDH, &ohci->regs->intrenable); - writel (OHCI_BLF, &ohci->regs->cmdstatus); /* start bulk list */ - writel (OHCI_CLF, &ohci->regs->cmdstatus); /* start Control list */ - break; - - default: - warn ("odd PCI resume for usb-%s", dev->slot_name); - } - - /* controller is operational, extra resumes are harmless */ - atomic_dec (&ohci->resume_count); - - return 0; -} - -#endif /* CONFIG_PM */ - - -/*-------------------------------------------------------------------------*/ - -static const struct pci_device_id __devinitdata ohci_pci_ids [] = { { - - /* - * AMD-756 [Viper] USB has a serious erratum when used with - * lowspeed devices like mice. - */ - vendor: 0x1022, - device: 0x740c, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, - - driver_data: OHCI_QUIRK_AMD756, - -} , { - - /* handle any USB OHCI controller */ - class: ((PCI_CLASS_SERIAL_USB << 8) | 0x10), - class_mask: ~0, - - /* no matter who makes it */ - vendor: PCI_ANY_ID, - device: PCI_ANY_ID, - subvendor: PCI_ANY_ID, - subdevice: PCI_ANY_ID, - - }, { /* end: all zeroes */ } -}; - -MODULE_DEVICE_TABLE (pci, ohci_pci_ids); - -static struct pci_driver ohci_pci_driver = { - name: "usb-ohci", - id_table: &ohci_pci_ids [0], - - probe: ohci_pci_probe, - remove: __devexit_p(ohci_pci_remove), - -#ifdef CONFIG_PM - suspend: ohci_pci_suspend, - resume: ohci_pci_resume, -#endif /* PM */ -}; - - -/*-------------------------------------------------------------------------*/ - -static int __init ohci_hcd_init (void) -{ - return pci_module_init (&ohci_pci_driver); -} - -/*-------------------------------------------------------------------------*/ - -static void __exit ohci_hcd_cleanup (void) -{ - pci_unregister_driver (&ohci_pci_driver); -} - -module_init (ohci_hcd_init); -module_exit (ohci_hcd_cleanup); - -MODULE_LICENSE("GPL"); diff -Nru a/drivers/usb/host/usb-ohci-sa1111.c b/drivers/usb/host/usb-ohci-sa1111.c --- a/drivers/usb/host/usb-ohci-sa1111.c Sat Jul 20 12:12:34 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,153 +0,0 @@ -/* - * linux/drivers/usb/usb-ohci-sa1111.c - * - * The outline of this code was taken from Brad Parkers - * original OHCI driver modifications, and reworked into a cleaner form - * by Russell King . - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "usb-ohci.h" - -int __devinit -hc_add_ohci(struct pci_dev *dev, int irq, void *membase, unsigned long flags, - ohci_t **ohci, const char *name, const char *slot_name); -extern void hc_remove_ohci(ohci_t *ohci); -extern int hc_start (ohci_t * ohci, struct device *parent_dev); -extern int hc_reset (ohci_t * ohci); - - -static ohci_t *sa1111_ohci; - -static void __init sa1111_ohci_configure(void) -{ - unsigned int usb_rst = 0; - - printk(KERN_DEBUG __FILE__ - ": starting SA-1111 OHCI USB Controller\n"); - -#ifdef CONFIG_SA1100_BADGE4 - if (machine_is_badge4()) - /* power the bus */ - badge4_set_5V(BADGE4_5V_USB, 1); -#endif - - if (machine_is_xp860() || - machine_has_neponset() || - machine_is_pfs168() || - machine_is_badge4()) - usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW; - - /* - * Configure the power sense and control lines. Place the USB - * host controller in reset. - */ - USB_RESET = usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET; - - /* - * Now, carefully enable the USB clock, and take - * the USB host controller out of reset. - */ - SKPCR |= SKPCR_UCLKEN; - udelay(11); - USB_RESET = usb_rst; -} - -static void __exit sa1111_ohci_unconfigure(void) -{ - printk(KERN_DEBUG __FILE__ - ": stopping SA-1111 OHCI USB Controller\n"); - - /* - * Put the USB host controller into reset. - */ - USB_RESET |= USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET; - - /* - * Stop the USB clock. - */ - SKPCR &= ~SKPCR_UCLKEN; - -#ifdef CONFIG_SA1100_BADGE4 - if (machine_is_badge4()) - badge4_set_5V(BADGE4_5V_USB, 0); -#endif -} - - -static int __init sa1111_ohci_init(void) -{ - int ret; - - if (!sa1111) - return -ENODEV; - - /* - * Request memory resources. - */ - if (!request_mem_region(_USB_OHCI_OP_BASE, _USB_EXTENT, "usb-ohci")) - return -EBUSY; - - sa1111_ohci_configure(); - - /* - * Initialise the generic OHCI driver. - */ - sa1111_ohci = 0; - ret = hc_add_ohci(SA1111_FAKE_PCIDEV, NIRQHCIM, - (void *)&USB_OHCI_OP_BASE, 0, &sa1111_ohci, - "usb-ohci", "sa1111"); - - if (ret || !sa1111_ohci) { - sa1111_ohci = 0; - sa1111_ohci_unconfigure(); - release_mem_region(_USB_OHCI_OP_BASE, _USB_EXTENT); - return -EBUSY; - } - - if (hc_start (sa1111_ohci, &sa1111->dev) < 0) { - err ("can't start usb-%s", sa1111_ohci->slot_name); - hc_remove_ohci (sa1111_ohci); - sa1111_ohci = 0; - sa1111_ohci_unconfigure(); - release_mem_region(_USB_OHCI_OP_BASE, _USB_EXTENT); - return -EBUSY; - } - - return 0; -} - -static void __exit sa1111_ohci_exit(void) -{ - printk(KERN_DEBUG __FUNCTION__ ": cleaning up\n"); - - if (sa1111_ohci) { - hc_remove_ohci(sa1111_ohci); - sa1111_ohci = 0; - } - - sa1111_ohci_unconfigure(); - release_mem_region(_USB_OHCI_OP_BASE, _USB_EXTENT); - - printk(KERN_DEBUG __FUNCTION__ ": exiting\n"); -} - -module_init(sa1111_ohci_init); -module_exit(sa1111_ohci_exit); - -MODULE_LICENSE("GPL"); diff -Nru a/drivers/usb/host/usb-ohci.c b/drivers/usb/host/usb-ohci.c --- a/drivers/usb/host/usb-ohci.c Sat Jul 20 12:12:35 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,2537 +0,0 @@ -/* - * URB OHCI HCD (Host Controller Driver) for USB. - * - * (C) Copyright 1999 Roman Weissgaerber - * (C) Copyright 2000-2002 David Brownell - * - * [ Initialisation is based on Linus' ] - * [ uhci code and gregs ohci fragments ] - * [ (C) Copyright 1999 Linus Torvalds ] - * [ (C) Copyright 1999 Gregory P. Smith] - * - * - * History: - * - * 2002/03/08 interrupt unlink fix (Matt Hughes), better cleanup on - * load failure (Matthew Frederickson) - * 2002/01/20 async unlink fixes: return -EINPROGRESS (per spec) and - * make interrupt unlink-in-completion work (db) - * - * 2001/09/19 USB_ZERO_PACKET support (Jean Tourrilhes) - * 2001/07/17 power management and pmac cleanup (Benjamin Herrenschmidt) - * 2001/03/24 td/ed hashing to remove bus_to_virt (Steve Longerbeam); - pci_map_single (db) - * 2001/03/21 td and dev/ed allocation uses new pci_pool API (db) - * 2001/03/07 hcca allocation uses pci_alloc_consistent (Steve Longerbeam) - * - * 2000/09/26 fixed races in removing the private portion of the urb - * 2000/09/07 disable bulk and control lists when unlinking the last - * endpoint descriptor in order to avoid unrecoverable errors on - * the Lucent chips. (rwc@sgi) - * 2000/08/29 use bandwidth claiming hooks (thanks Randy!), fix some - * urb unlink probs, indentation fixes - * 2000/08/11 various oops fixes mostly affecting iso and cleanup from - * device unplugs. - * 2000/06/28 use PCI hotplug framework, for better power management - * and for Cardbus support (David Brownell) - * 2000/earlier: fixes for NEC/Lucent chips; suspend/resume handling - * when the controller loses power; handle UE; cleanup; ... - * - * v5.2 1999/12/07 URB 3rd preview, - * v5.1 1999/11/30 URB 2nd preview, cpia, (usb-scsi) - * v5.0 1999/11/22 URB Technical preview, Paul Mackerras powerbook susp/resume - * i386: HUB, Keyboard, Mouse, Printer - * - * v4.3 1999/10/27 multiple HCs, bulk_request - * v4.2 1999/09/05 ISO API alpha, new dev alloc, neg Error-codes - * v4.1 1999/08/27 Randy Dunlap's - ISO API first impl. - * v4.0 1999/08/18 - * v3.0 1999/06/25 - * v2.1 1999/05/09 code clean up - * v2.0 1999/05/04 - * v1.0 1999/04/27 initial release - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for in_interrupt() */ - -#ifdef CONFIG_USB_DEBUG -# define DEBUG -#else -# undef DEBUG -#endif -#include - -#include -#include -#include -#include -#include - -#define OHCI_USE_NPS // force NoPowerSwitching mode -// #define OHCI_VERBOSE_DEBUG /* not always helpful */ - -#include "../core/hcd.h" -#include "usb-ohci.h" - - -/* - * Version Information - */ -#define DRIVER_VERSION "v5.3" -#define DRIVER_AUTHOR "Roman Weissgaerber , David Brownell" -#define DRIVER_DESC "USB OHCI Host Controller Driver" - -#define OHCI_UNLINK_TIMEOUT (HZ / 10) - -static LIST_HEAD (ohci_hcd_list); -spinlock_t usb_ed_lock = SPIN_LOCK_UNLOCKED; - - -/*-------------------------------------------------------------------------*/ - -/* AMD-756 (D2 rev) reports corrupt register contents in some cases. - * The erratum (#4) description is incorrect. AMD's workaround waits - * till some bits (mostly reserved) are clear; ok for all revs. - */ -#define read_roothub(hc, register, mask) ({ \ - u32 temp = readl (&hc->regs->roothub.register); \ - if (hc->flags & OHCI_QUIRK_AMD756) \ - while (temp & mask) \ - temp = readl (&hc->regs->roothub.register); \ - temp; }) - -static u32 roothub_a (struct ohci *hc) - { return read_roothub (hc, a, 0xfc0fe000); } -static inline u32 roothub_b (struct ohci *hc) - { return readl (&hc->regs->roothub.b); } -static inline u32 roothub_status (struct ohci *hc) - { return readl (&hc->regs->roothub.status); } -static u32 roothub_portstatus (struct ohci *hc, int i) - { return read_roothub (hc, portstatus [i], 0xffe0fce0); } - - -/*-------------------------------------------------------------------------* - * URB support functions - *-------------------------------------------------------------------------*/ - -/* free HCD-private data associated with this URB */ - -static void urb_free_priv (struct ohci *hc, urb_priv_t * urb_priv) -{ - int i; - int last = urb_priv->length - 1; - int len; - int dir; - struct td *td; - - if (last >= 0) { - - /* ISOC, BULK, INTR data buffer starts at td 0 - * CTRL setup starts at td 0 */ - td = urb_priv->td [0]; - - len = td->urb->transfer_buffer_length, - dir = usb_pipeout (td->urb->pipe) - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE; - - /* unmap CTRL URB setup */ - if (usb_pipecontrol (td->urb->pipe)) { - pci_unmap_single (hc->ohci_dev, - td->data_dma, 8, PCI_DMA_TODEVICE); - - /* CTRL data buffer starts at td 1 if len > 0 */ - if (len && last > 0) - td = urb_priv->td [1]; - } - - /* unmap data buffer */ - if (len && td->data_dma) - pci_unmap_single (hc->ohci_dev, td->data_dma, len, dir); - - for (i = 0; i <= last; i++) { - td = urb_priv->td [i]; - if (td) - td_free (hc, td); - } - } - - kfree (urb_priv); -} - -static void urb_rm_priv_locked (struct urb * urb) -{ - urb_priv_t * urb_priv = urb->hcpriv; - - if (urb_priv) { - urb->hcpriv = NULL; - -#ifdef DO_TIMEOUTS - if (urb->timeout) { - list_del (&urb->urb_list); - urb->timeout -= jiffies; - } -#endif - - /* Release int/iso bandwidth */ - if (urb->bandwidth) { - switch (usb_pipetype(urb->pipe)) { - case PIPE_INTERRUPT: - usb_release_bandwidth (urb->dev, urb, 0); - break; - case PIPE_ISOCHRONOUS: - usb_release_bandwidth (urb->dev, urb, 1); - break; - default: - break; - } - } - - urb_free_priv ((struct ohci *)urb->dev->bus->hcpriv, urb_priv); - usb_put_dev (urb->dev); - urb->dev = NULL; - usb_put_urb (urb); - } -} - -static void urb_rm_priv (struct urb * urb) -{ - unsigned long flags; - - spin_lock_irqsave (&usb_ed_lock, flags); - urb_rm_priv_locked (urb); - spin_unlock_irqrestore (&usb_ed_lock, flags); -} - -/*-------------------------------------------------------------------------*/ - -#ifdef DEBUG -static int sohci_get_current_frame_number (struct usb_device * dev); - -/* debug| print the main components of an URB - * small: 0) header + data packets 1) just header */ - -static void urb_print (struct urb * urb, char * str, int small) -{ - unsigned int pipe= urb->pipe; - - if (!urb->dev || !urb->dev->bus) { - dbg("%s URB: no dev", str); - return; - } - -#ifndef OHCI_VERBOSE_DEBUG - if (urb->status != 0) -#endif - dbg("%s URB:[%4x] dev:%2d,ep:%2d-%c,type:%s,flags:%4x,len:%d/%d,stat:%d(%x)", - str, - sohci_get_current_frame_number (urb->dev), - usb_pipedevice (pipe), - usb_pipeendpoint (pipe), - usb_pipeout (pipe)? 'O': 'I', - usb_pipetype (pipe) < 2? (usb_pipeint (pipe)? "INTR": "ISOC"): - (usb_pipecontrol (pipe)? "CTRL": "BULK"), - urb->transfer_flags, - urb->actual_length, - urb->transfer_buffer_length, - urb->status, urb->status); -#ifdef OHCI_VERBOSE_DEBUG - if (!small) { - int i, len; - - if (usb_pipecontrol (pipe)) { - printk (KERN_DEBUG __FILE__ ": cmd(8):"); - for (i = 0; i < 8 ; i++) - printk (" %02x", ((__u8 *) urb->setup_packet) [i]); - printk ("\n"); - } - if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { - printk (KERN_DEBUG __FILE__ ": data(%d/%d):", - urb->actual_length, - urb->transfer_buffer_length); - len = usb_pipeout (pipe)? - urb->transfer_buffer_length: urb->actual_length; - for (i = 0; i < 16 && i < len; i++) - printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]); - printk ("%s stat:%d\n", i < len? "...": "", urb->status); - } - } -#endif -} - -/* just for debugging; prints non-empty branches of the int ed tree inclusive iso eds*/ -void ep_print_int_eds (ohci_t * ohci, char * str) { - int i, j; - __u32 * ed_p; - for (i= 0; i < 32; i++) { - j = 5; - ed_p = &(ohci->hcca->int_table [i]); - if (*ed_p == 0) - continue; - printk (KERN_DEBUG __FILE__ ": %s branch int %2d(%2x):", str, i, i); - while (*ed_p != 0 && j--) { - ed_t *ed = dma_to_ed (ohci, le32_to_cpup(ed_p)); - printk (" ed: %4x;", ed->hwINFO); - ed_p = &ed->hwNextED; - } - printk ("\n"); - } -} - - -static void ohci_dump_intr_mask (char *label, __u32 mask) -{ - dbg ("%s: 0x%08x%s%s%s%s%s%s%s%s%s", - label, - mask, - (mask & OHCI_INTR_MIE) ? " MIE" : "", - (mask & OHCI_INTR_OC) ? " OC" : "", - (mask & OHCI_INTR_RHSC) ? " RHSC" : "", - (mask & OHCI_INTR_FNO) ? " FNO" : "", - (mask & OHCI_INTR_UE) ? " UE" : "", - (mask & OHCI_INTR_RD) ? " RD" : "", - (mask & OHCI_INTR_SF) ? " SF" : "", - (mask & OHCI_INTR_WDH) ? " WDH" : "", - (mask & OHCI_INTR_SO) ? " SO" : "" - ); -} - -static void maybe_print_eds (char *label, __u32 value) -{ - if (value) - dbg ("%s %08x", label, value); -} - -static char *hcfs2string (int state) -{ - switch (state) { - case OHCI_USB_RESET: return "reset"; - case OHCI_USB_RESUME: return "resume"; - case OHCI_USB_OPER: return "operational"; - case OHCI_USB_SUSPEND: return "suspend"; - } - return "?"; -} - -// dump control and status registers -static void ohci_dump_status (ohci_t *controller) -{ - struct ohci_regs *regs = controller->regs; - __u32 temp; - - temp = readl (®s->revision) & 0xff; - if (temp != 0x10) - dbg ("spec %d.%d", (temp >> 4), (temp & 0x0f)); - - temp = readl (®s->control); - dbg ("control: 0x%08x%s%s%s HCFS=%s%s%s%s%s CBSR=%d", temp, - (temp & OHCI_CTRL_RWE) ? " RWE" : "", - (temp & OHCI_CTRL_RWC) ? " RWC" : "", - (temp & OHCI_CTRL_IR) ? " IR" : "", - hcfs2string (temp & OHCI_CTRL_HCFS), - (temp & OHCI_CTRL_BLE) ? " BLE" : "", - (temp & OHCI_CTRL_CLE) ? " CLE" : "", - (temp & OHCI_CTRL_IE) ? " IE" : "", - (temp & OHCI_CTRL_PLE) ? " PLE" : "", - temp & OHCI_CTRL_CBSR - ); - - temp = readl (®s->cmdstatus); - dbg ("cmdstatus: 0x%08x SOC=%d%s%s%s%s", temp, - (temp & OHCI_SOC) >> 16, - (temp & OHCI_OCR) ? " OCR" : "", - (temp & OHCI_BLF) ? " BLF" : "", - (temp & OHCI_CLF) ? " CLF" : "", - (temp & OHCI_HCR) ? " HCR" : "" - ); - - ohci_dump_intr_mask ("intrstatus", readl (®s->intrstatus)); - ohci_dump_intr_mask ("intrenable", readl (®s->intrenable)); - // intrdisable always same as intrenable - // ohci_dump_intr_mask ("intrdisable", readl (®s->intrdisable)); - - maybe_print_eds ("ed_periodcurrent", readl (®s->ed_periodcurrent)); - - maybe_print_eds ("ed_controlhead", readl (®s->ed_controlhead)); - maybe_print_eds ("ed_controlcurrent", readl (®s->ed_controlcurrent)); - - maybe_print_eds ("ed_bulkhead", readl (®s->ed_bulkhead)); - maybe_print_eds ("ed_bulkcurrent", readl (®s->ed_bulkcurrent)); - - maybe_print_eds ("donehead", readl (®s->donehead)); -} - -static void ohci_dump_roothub (ohci_t *controller, int verbose) -{ - __u32 temp, ndp, i; - - temp = roothub_a (controller); - ndp = (temp & RH_A_NDP); - - if (verbose) { - dbg ("roothub.a: %08x POTPGT=%d%s%s%s%s%s NDP=%d", temp, - ((temp & RH_A_POTPGT) >> 24) & 0xff, - (temp & RH_A_NOCP) ? " NOCP" : "", - (temp & RH_A_OCPM) ? " OCPM" : "", - (temp & RH_A_DT) ? " DT" : "", - (temp & RH_A_NPS) ? " NPS" : "", - (temp & RH_A_PSM) ? " PSM" : "", - ndp - ); - temp = roothub_b (controller); - dbg ("roothub.b: %08x PPCM=%04x DR=%04x", - temp, - (temp & RH_B_PPCM) >> 16, - (temp & RH_B_DR) - ); - temp = roothub_status (controller); - dbg ("roothub.status: %08x%s%s%s%s%s%s", - temp, - (temp & RH_HS_CRWE) ? " CRWE" : "", - (temp & RH_HS_OCIC) ? " OCIC" : "", - (temp & RH_HS_LPSC) ? " LPSC" : "", - (temp & RH_HS_DRWE) ? " DRWE" : "", - (temp & RH_HS_OCI) ? " OCI" : "", - (temp & RH_HS_LPS) ? " LPS" : "" - ); - } - - for (i = 0; i < ndp; i++) { - temp = roothub_portstatus (controller, i); - dbg ("roothub.portstatus [%d] = 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s", - i, - temp, - (temp & RH_PS_PRSC) ? " PRSC" : "", - (temp & RH_PS_OCIC) ? " OCIC" : "", - (temp & RH_PS_PSSC) ? " PSSC" : "", - (temp & RH_PS_PESC) ? " PESC" : "", - (temp & RH_PS_CSC) ? " CSC" : "", - - (temp & RH_PS_LSDA) ? " LSDA" : "", - (temp & RH_PS_PPS) ? " PPS" : "", - (temp & RH_PS_PRS) ? " PRS" : "", - (temp & RH_PS_POCI) ? " POCI" : "", - (temp & RH_PS_PSS) ? " PSS" : "", - - (temp & RH_PS_PES) ? " PES" : "", - (temp & RH_PS_CCS) ? " CCS" : "" - ); - } -} - -static void ohci_dump (ohci_t *controller, int verbose) -{ - dbg ("OHCI controller usb-%s state", controller->slot_name); - - // dumps some of the state we know about - ohci_dump_status (controller); - if (verbose) - ep_print_int_eds (controller, "hcca"); - dbg ("hcca frame #%04x", controller->hcca->frame_no); - ohci_dump_roothub (controller, 1); -} - - -#endif - -/*-------------------------------------------------------------------------* - * Interface functions (URB) - *-------------------------------------------------------------------------*/ - -/* return a request to the completion handler */ - -static int sohci_return_urb (struct ohci *hc, struct urb * urb) -{ - urb_priv_t * urb_priv = urb->hcpriv; - struct urb * urbt = NULL; - unsigned long flags; - int i; - - if (!urb_priv) - return -1; /* urb already unlinked */ - - /* just to be sure */ - if (!urb->complete) { - urb_rm_priv (urb); - return -1; - } - -#ifdef DEBUG - urb_print (urb, "RET", usb_pipeout (urb->pipe)); -#endif - - switch (usb_pipetype (urb->pipe)) { - case PIPE_INTERRUPT: - pci_unmap_single (hc->ohci_dev, - urb_priv->td [0]->data_dma, - urb->transfer_buffer_length, - usb_pipeout (urb->pipe) - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE); - urb->complete (urb); - - /* implicitly requeued */ - urb->actual_length = 0; - urb->status = -EINPROGRESS; - td_submit_urb (urb); - break; - - case PIPE_ISOCHRONOUS: - // for (urbt = urb->next; urbt && (urbt != urb); urbt = urbt->next); - if (urbt) { /* send the reply and requeue URB */ - pci_unmap_single (hc->ohci_dev, - urb_priv->td [0]->data_dma, - urb->transfer_buffer_length, - usb_pipeout (urb->pipe) - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE); - urb->complete (urb); - spin_lock_irqsave (&usb_ed_lock, flags); - urb->actual_length = 0; - urb->status = -EINPROGRESS; - urb->start_frame = urb_priv->ed->last_iso + 1; - if (urb_priv->state != URB_DEL) { - for (i = 0; i < urb->number_of_packets; i++) { - urb->iso_frame_desc[i].actual_length = 0; - urb->iso_frame_desc[i].status = -EXDEV; - } - td_submit_urb (urb); - } - spin_unlock_irqrestore (&usb_ed_lock, flags); - - } else { /* unlink URB, call complete */ - urb_rm_priv (urb); - urb->complete (urb); - } - break; - - case PIPE_BULK: - case PIPE_CONTROL: /* unlink URB, call complete */ - urb_rm_priv (urb); - urb->complete (urb); - break; - } - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* get a transfer request */ - -static int sohci_submit_urb (struct urb * urb, int mem_flags) -{ - ohci_t * ohci; - ed_t * ed; - urb_priv_t * urb_priv; - unsigned int pipe = urb->pipe; - int maxps = usb_maxpacket (urb->dev, pipe, usb_pipeout (pipe)); - int i, size = 0; - unsigned long flags; - int bustime = 0; - - if (!urb->dev || !urb->dev->bus) - return -ENODEV; - - if (urb->hcpriv) /* urb already in use */ - return -EINVAL; - -// if(usb_endpoint_halted (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe))) -// return -EPIPE; - - /* increment the reference count of the urb, as we now also control it */ - urb = usb_get_urb (urb); - - usb_get_dev (urb->dev); - ohci = (ohci_t *) urb->dev->bus->hcpriv; - -#ifdef DEBUG - urb_print (urb, "SUB", usb_pipein (pipe)); -#endif - - /* handle a request to the virtual root hub */ - if (usb_pipedevice (pipe) == ohci->rh.devnum) - return rh_submit_urb (urb); - - /* when controller's hung, permit only roothub cleanup attempts - * such as powering down ports */ - if (ohci->disabled) { - usb_put_dev (urb->dev); - usb_put_urb (urb); - return -ESHUTDOWN; - } - - /* every endpoint has a ed, locate and fill it */ - if (!(ed = ep_add_ed (urb->dev, pipe, urb->interval, 1, mem_flags))) { - usb_put_dev (urb->dev); - usb_put_urb (urb); - return -ENOMEM; - } - - /* for the private part of the URB we need the number of TDs (size) */ - switch (usb_pipetype (pipe)) { - case PIPE_BULK: /* one TD for every 4096 Byte */ - size = (urb->transfer_buffer_length - 1) / 4096 + 1; - - /* If the transfer size is multiple of the pipe mtu, - * we may need an extra TD to create a empty frame - * Jean II */ - if ((urb->transfer_flags & USB_ZERO_PACKET) && - usb_pipeout (pipe) && - (urb->transfer_buffer_length != 0) && - ((urb->transfer_buffer_length % maxps) == 0)) - size++; - break; - case PIPE_ISOCHRONOUS: /* number of packets from URB */ - size = urb->number_of_packets; - if (size <= 0) { - usb_put_dev (urb->dev); - usb_put_urb (urb); - return -EINVAL; - } - for (i = 0; i < urb->number_of_packets; i++) { - urb->iso_frame_desc[i].actual_length = 0; - urb->iso_frame_desc[i].status = -EXDEV; - } - break; - case PIPE_CONTROL: /* 1 TD for setup, 1 for ACK and 1 for every 4096 B */ - size = (urb->transfer_buffer_length == 0)? 2: - (urb->transfer_buffer_length - 1) / 4096 + 3; - break; - case PIPE_INTERRUPT: /* one TD */ - size = 1; - break; - } - - /* allocate the private part of the URB */ - urb_priv = kmalloc (sizeof (urb_priv_t) + size * sizeof (td_t *), mem_flags); - if (!urb_priv) { - usb_put_dev (urb->dev); - usb_put_urb (urb); - return -ENOMEM; - } - memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (td_t *)); - - /* fill the private part of the URB */ - urb_priv->length = size; - urb_priv->ed = ed; - - /* allocate the TDs (updating hash chains) */ - spin_lock_irqsave (&usb_ed_lock, flags); - for (i = 0; i < size; i++) { - urb_priv->td[i] = td_alloc (ohci, SLAB_ATOMIC); - if (!urb_priv->td[i]) { - urb_priv->length = i; - urb_free_priv (ohci, urb_priv); - spin_unlock_irqrestore (&usb_ed_lock, flags); - usb_put_dev (urb->dev); - usb_put_urb (urb); - return -ENOMEM; - } - } - - if (ed->state == ED_NEW || (ed->state & ED_DEL)) { - urb_free_priv (ohci, urb_priv); - spin_unlock_irqrestore (&usb_ed_lock, flags); - usb_put_dev (urb->dev); - usb_put_urb (urb); - return -EINVAL; - } - - /* allocate and claim bandwidth if needed; ISO - * needs start frame index if it was't provided. - */ - switch (usb_pipetype (pipe)) { - case PIPE_ISOCHRONOUS: - if (urb->transfer_flags & USB_ISO_ASAP) { - urb->start_frame = ((ed->state == ED_OPER) - ? (ed->last_iso + 1) - : (le16_to_cpu (ohci->hcca->frame_no) + 10)) & 0xffff; - } - /* FALLTHROUGH */ - case PIPE_INTERRUPT: - if (urb->bandwidth == 0) { - bustime = usb_check_bandwidth (urb->dev, urb); - } - if (bustime < 0) { - urb_free_priv (ohci, urb_priv); - spin_unlock_irqrestore (&usb_ed_lock, flags); - usb_put_dev (urb->dev); - usb_put_urb (urb); - return bustime; - } - usb_claim_bandwidth (urb->dev, urb, bustime, usb_pipeisoc (urb->pipe)); -#ifdef DO_TIMEOUTS - urb->timeout = 0; -#endif - } - - urb->actual_length = 0; - urb->hcpriv = urb_priv; - urb->status = -EINPROGRESS; - - /* link the ed into a chain if is not already */ - if (ed->state != ED_OPER) - ep_link (ohci, ed); - - /* fill the TDs and link it to the ed */ - td_submit_urb (urb); - -#ifdef DO_TIMEOUTS - /* maybe add to ordered list of timeouts */ - if (urb->timeout) { - struct list_head *entry; - - // FIXME: usb-uhci uses relative timeouts (like this), - // while uhci uses absolute ones (probably better). - // Pick one solution and change the affected drivers. - urb->timeout += jiffies; - - list_for_each (entry, &ohci->timeout_list) { - struct urb *next_urb; - - next_urb = list_entry (entry, struct urb, urb_list); - if (time_after_eq (urb->timeout, next_urb->timeout)) - break; - } - list_add (&urb->urb_list, entry); - - /* drive timeouts by SF (messy, but works) */ - writel (OHCI_INTR_SF, &ohci->regs->intrenable); - } -#endif - - spin_unlock_irqrestore (&usb_ed_lock, flags); - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* deactivate all TDs and remove the private part of the URB */ -/* interrupt callers must use async unlink mode */ - -static int sohci_unlink_urb (struct urb * urb) -{ - unsigned long flags; - ohci_t * ohci; - - if (!urb) /* just to be sure */ - return -EINVAL; - - if (!urb->dev || !urb->dev->bus) - return -ENODEV; - - ohci = (ohci_t *) urb->dev->bus->hcpriv; - -#ifdef DEBUG - urb_print (urb, "UNLINK", 1); -#endif - - /* handle a request to the virtual root hub */ - if (usb_pipedevice (urb->pipe) == ohci->rh.devnum) - return rh_unlink_urb (urb); - - if (urb->hcpriv && (urb->status == -EINPROGRESS)) { - if (!ohci->disabled) { - urb_priv_t * urb_priv; - - /* interrupt code may not sleep; it must use - * async status return to unlink pending urbs. - */ - if (!(urb->transfer_flags & USB_ASYNC_UNLINK) - && in_interrupt ()) { - err ("bug in call from %p; use async!", - __builtin_return_address(0)); - return -EWOULDBLOCK; - } - - /* flag the urb and its TDs for deletion in some - * upcoming SF interrupt delete list processing - */ - spin_lock_irqsave (&usb_ed_lock, flags); - urb_priv = urb->hcpriv; - - if (!urb_priv || (urb_priv->state == URB_DEL)) { - spin_unlock_irqrestore (&usb_ed_lock, flags); - return 0; - } - - urb_priv->state = URB_DEL; - ep_rm_ed (urb->dev, urb_priv->ed); - urb_priv->ed->state |= ED_URB_DEL; - - if (!(urb->transfer_flags & USB_ASYNC_UNLINK)) { - DECLARE_WAIT_QUEUE_HEAD (unlink_wakeup); - DECLARE_WAITQUEUE (wait, current); - int timeout = OHCI_UNLINK_TIMEOUT; - - add_wait_queue (&unlink_wakeup, &wait); - urb_priv->wait = &unlink_wakeup; - spin_unlock_irqrestore (&usb_ed_lock, flags); - - /* wait until all TDs are deleted */ - set_current_state(TASK_UNINTERRUPTIBLE); - while (timeout && (urb->status == -EINPROGRESS)) - timeout = schedule_timeout (timeout); - set_current_state(TASK_RUNNING); - remove_wait_queue (&unlink_wakeup, &wait); - if (urb->status == -EINPROGRESS) { - err ("unlink URB timeout"); - return -ETIMEDOUT; - } - } else { - /* usb_put_dev done in dl_del_list() */ - urb->status = -EINPROGRESS; - spin_unlock_irqrestore (&usb_ed_lock, flags); - return -EINPROGRESS; - } - } else { - urb_rm_priv (urb); - if (urb->transfer_flags & USB_ASYNC_UNLINK) { - urb->status = -ECONNRESET; - if (urb->complete) - urb->complete (urb); - } else - urb->status = -ENOENT; - } - } - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* allocate private data space for a usb device */ - -static int sohci_alloc_dev (struct usb_device *usb_dev) -{ - struct ohci_device * dev; - - dev = dev_alloc ((struct ohci *) usb_dev->bus->hcpriv, ALLOC_FLAGS); - if (!dev) - return -ENOMEM; - - usb_dev->hcpriv = dev; - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* may be called from interrupt context */ -/* frees private data space of usb device */ - -static int sohci_free_dev (struct usb_device * usb_dev) -{ - unsigned long flags; - int i, cnt = 0; - ed_t * ed; - struct ohci_device * dev = usb_to_ohci (usb_dev); - ohci_t * ohci = usb_dev->bus->hcpriv; - - if (!dev) - return 0; - - if (usb_dev->devnum >= 0) { - - /* driver disconnects should have unlinked all urbs - * (freeing all the TDs, unlinking EDs) but we need - * to defend against bugs that prevent that. - */ - spin_lock_irqsave (&usb_ed_lock, flags); - for(i = 0; i < NUM_EDS; i++) { - ed = &(dev->ed[i]); - if (ed->state != ED_NEW) { - if (ed->state == ED_OPER) { - /* driver on that interface didn't unlink an urb */ - dbg ("driver usb-%s dev %d ed 0x%x unfreed URB", - ohci->slot_name, usb_dev->devnum, i); - ep_unlink (ohci, ed); - } - ep_rm_ed (usb_dev, ed); - ed->state = ED_DEL; - cnt++; - } - } - spin_unlock_irqrestore (&usb_ed_lock, flags); - - /* if the controller is running, tds for those unlinked - * urbs get freed by dl_del_list at the next SF interrupt - */ - if (cnt > 0) { - - if (ohci->disabled) { - /* FIXME: Something like this should kick in, - * though it's currently an exotic case ... - * the controller won't ever be touching - * these lists again!! - dl_del_list (ohci, - le16_to_cpu (ohci->hcca->frame_no) & 1); - */ - warn ("TD leak, %d", cnt); - - } else if (!in_interrupt ()) { - DECLARE_WAIT_QUEUE_HEAD (freedev_wakeup); - DECLARE_WAITQUEUE (wait, current); - int timeout = OHCI_UNLINK_TIMEOUT; - - /* SF interrupt handler calls dl_del_list */ - add_wait_queue (&freedev_wakeup, &wait); - dev->wait = &freedev_wakeup; - set_current_state(TASK_UNINTERRUPTIBLE); - while (timeout && dev->ed_cnt) - timeout = schedule_timeout (timeout); - set_current_state(TASK_RUNNING); - remove_wait_queue (&freedev_wakeup, &wait); - if (dev->ed_cnt) { - err ("free device %d timeout", usb_dev->devnum); - return -ETIMEDOUT; - } - } else { - /* likely some interface's driver has a refcount bug */ - err ("bus %s devnum %d deletion in interrupt", - ohci->slot_name, usb_dev->devnum); - BUG (); - } - } - } - - /* free device, and associated EDs */ - dev_free (ohci, dev); - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* tell us the current USB frame number */ - -static int sohci_get_current_frame_number (struct usb_device *usb_dev) -{ - ohci_t * ohci = usb_dev->bus->hcpriv; - - return le16_to_cpu (ohci->hcca->frame_no); -} - -/*-------------------------------------------------------------------------*/ - -struct usb_operations sohci_device_operations = { - allocate: sohci_alloc_dev, - deallocate: sohci_free_dev, - get_frame_number: sohci_get_current_frame_number, - submit_urb: sohci_submit_urb, - unlink_urb: sohci_unlink_urb, -}; - -/*-------------------------------------------------------------------------* - * ED handling functions - *-------------------------------------------------------------------------*/ - -/* search for the right branch to insert an interrupt ed into the int tree - * do some load ballancing; - * returns the branch and - * sets the interval to interval = 2^integer (ld (interval)) */ - -static int ep_int_ballance (ohci_t * ohci, int interval, int load) -{ - int i, branch = 0; - - /* search for the least loaded interrupt endpoint branch of all 32 branches */ - for (i = 0; i < 32; i++) - if (ohci->ohci_int_load [branch] > ohci->ohci_int_load [i]) branch = i; - - branch = branch % interval; - for (i = branch; i < 32; i += interval) ohci->ohci_int_load [i] += load; - - return branch; -} - -/*-------------------------------------------------------------------------*/ - -/* 2^int( ld (inter)) */ - -static int ep_2_n_interval (int inter) -{ - int i; - for (i = 0; ((inter >> i) > 1 ) && (i < 5); i++); - return 1 << i; -} - -/*-------------------------------------------------------------------------*/ - -/* the int tree is a binary tree - * in order to process it sequentially the indexes of the branches have to be mapped - * the mapping reverses the bits of a word of num_bits length */ - -static int ep_rev (int num_bits, int word) -{ - int i, wout = 0; - - for (i = 0; i < num_bits; i++) wout |= (((word >> i) & 1) << (num_bits - i - 1)); - return wout; -} - -/*-------------------------------------------------------------------------*/ - -/* link an ed into one of the HC chains */ - -static int ep_link (ohci_t * ohci, ed_t * edi) -{ - int int_branch; - int i; - int inter; - int interval; - int load; - __u32 * ed_p; - volatile ed_t * ed = edi; - - ed->state = ED_OPER; - - switch (ed->type) { - case PIPE_CONTROL: - ed->hwNextED = 0; - if (ohci->ed_controltail == NULL) { - writel (ed->dma, &ohci->regs->ed_controlhead); - } else { - ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma); - } - ed->ed_prev = ohci->ed_controltail; - if (!ohci->ed_controltail && !ohci->ed_rm_list[0] && - !ohci->ed_rm_list[1] && !ohci->sleeping) { - ohci->hc_control |= OHCI_CTRL_CLE; - writel (ohci->hc_control, &ohci->regs->control); - } - ohci->ed_controltail = edi; - break; - - case PIPE_BULK: - ed->hwNextED = 0; - if (ohci->ed_bulktail == NULL) { - writel (ed->dma, &ohci->regs->ed_bulkhead); - } else { - ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma); - } - ed->ed_prev = ohci->ed_bulktail; - if (!ohci->ed_bulktail && !ohci->ed_rm_list[0] && - !ohci->ed_rm_list[1] && !ohci->sleeping) { - ohci->hc_control |= OHCI_CTRL_BLE; - writel (ohci->hc_control, &ohci->regs->control); - } - ohci->ed_bulktail = edi; - break; - - case PIPE_INTERRUPT: - load = ed->int_load; - interval = ep_2_n_interval (ed->int_period); - ed->int_interval = interval; - int_branch = ep_int_ballance (ohci, interval, load); - ed->int_branch = int_branch; - - for (i = 0; i < ep_rev (6, interval); i += inter) { - inter = 1; - for (ed_p = &(ohci->hcca->int_table[ep_rev (5, i) + int_branch]); - (*ed_p != 0) && ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->int_interval >= interval); - ed_p = &((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED)) - inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->int_interval); - ed->hwNextED = *ed_p; - *ed_p = cpu_to_le32 (ed->dma); - } -#ifdef DEBUG - ep_print_int_eds (ohci, "LINK_INT"); -#endif - break; - - case PIPE_ISOCHRONOUS: - ed->hwNextED = 0; - ed->int_interval = 1; - if (ohci->ed_isotail != NULL) { - ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma); - ed->ed_prev = ohci->ed_isotail; - } else { - for ( i = 0; i < 32; i += inter) { - inter = 1; - for (ed_p = &(ohci->hcca->int_table[ep_rev (5, i)]); - *ed_p != 0; - ed_p = &((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED)) - inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->int_interval); - *ed_p = cpu_to_le32 (ed->dma); - } - ed->ed_prev = NULL; - } - ohci->ed_isotail = edi; -#ifdef DEBUG - ep_print_int_eds (ohci, "LINK_ISO"); -#endif - break; - } - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* scan the periodic table to find and unlink this ED */ -static void periodic_unlink ( - struct ohci *ohci, - struct ed *ed, - unsigned index, - unsigned period -) { - for (; index < NUM_INTS; index += period) { - __u32 *ed_p = &ohci->hcca->int_table [index]; - - /* ED might have been unlinked through another path */ - while (*ed_p != 0) { - if ((dma_to_ed (ohci, le32_to_cpup (ed_p))) == ed) { - *ed_p = ed->hwNextED; - break; - } - ed_p = & ((dma_to_ed (ohci, - le32_to_cpup (ed_p)))->hwNextED); - } - } -} - -/* unlink an ed from one of the HC chains. - * just the link to the ed is unlinked. - * the link from the ed still points to another operational ed or 0 - * so the HC can eventually finish the processing of the unlinked ed */ - -static int ep_unlink (ohci_t * ohci, ed_t * ed) -{ - int i; - - ed->hwINFO |= cpu_to_le32 (OHCI_ED_SKIP); - - switch (ed->type) { - case PIPE_CONTROL: - if (ed->ed_prev == NULL) { - if (!ed->hwNextED) { - ohci->hc_control &= ~OHCI_CTRL_CLE; - writel (ohci->hc_control, &ohci->regs->control); - } - writel (le32_to_cpup (&ed->hwNextED), &ohci->regs->ed_controlhead); - } else { - ed->ed_prev->hwNextED = ed->hwNextED; - } - if (ohci->ed_controltail == ed) { - ohci->ed_controltail = ed->ed_prev; - } else { - (dma_to_ed (ohci, le32_to_cpup (&ed->hwNextED)))->ed_prev = ed->ed_prev; - } - break; - - case PIPE_BULK: - if (ed->ed_prev == NULL) { - if (!ed->hwNextED) { - ohci->hc_control &= ~OHCI_CTRL_BLE; - writel (ohci->hc_control, &ohci->regs->control); - } - writel (le32_to_cpup (&ed->hwNextED), &ohci->regs->ed_bulkhead); - } else { - ed->ed_prev->hwNextED = ed->hwNextED; - } - if (ohci->ed_bulktail == ed) { - ohci->ed_bulktail = ed->ed_prev; - } else { - (dma_to_ed (ohci, le32_to_cpup (&ed->hwNextED)))->ed_prev = ed->ed_prev; - } - break; - - case PIPE_INTERRUPT: - periodic_unlink (ohci, ed, 0, 1); - for (i = ed->int_branch; i < 32; i += ed->int_interval) - ohci->ohci_int_load[i] -= ed->int_load; -#ifdef DEBUG - ep_print_int_eds (ohci, "UNLINK_INT"); -#endif - break; - - case PIPE_ISOCHRONOUS: - if (ohci->ed_isotail == ed) - ohci->ed_isotail = ed->ed_prev; - if (ed->hwNextED != 0) - (dma_to_ed (ohci, le32_to_cpup (&ed->hwNextED))) - ->ed_prev = ed->ed_prev; - - if (ed->ed_prev != NULL) - ed->ed_prev->hwNextED = ed->hwNextED; - else - periodic_unlink (ohci, ed, 0, 1); -#ifdef DEBUG - ep_print_int_eds (ohci, "UNLINK_ISO"); -#endif - break; - } - ed->state = ED_UNLINK; - return 0; -} - - -/*-------------------------------------------------------------------------*/ - -/* add/reinit an endpoint; this should be done once at the usb_set_configuration command, - * but the USB stack is a little bit stateless so we do it at every transaction - * if the state of the ed is ED_NEW then a dummy td is added and the state is changed to ED_UNLINK - * in all other cases the state is left unchanged - * the ed info fields are setted anyway even though most of them should not change */ - -static ed_t * ep_add_ed ( - struct usb_device * usb_dev, - unsigned int pipe, - int interval, - int load, - int mem_flags -) -{ - ohci_t * ohci = usb_dev->bus->hcpriv; - td_t * td; - ed_t * ed_ret; - volatile ed_t * ed; - unsigned long flags; - - - spin_lock_irqsave (&usb_ed_lock, flags); - - ed = ed_ret = &(usb_to_ohci (usb_dev)->ed[(usb_pipeendpoint (pipe) << 1) | - (usb_pipecontrol (pipe)? 0: usb_pipeout (pipe))]); - - if ((ed->state & ED_DEL) || (ed->state & ED_URB_DEL)) { - /* pending delete request */ - spin_unlock_irqrestore (&usb_ed_lock, flags); - return NULL; - } - - if (ed->state == ED_NEW) { - ed->hwINFO = cpu_to_le32 (OHCI_ED_SKIP); /* skip ed */ - /* dummy td; end of td list for ed */ - td = td_alloc (ohci, SLAB_ATOMIC); - /* hash the ed for later reverse mapping */ - if (!td || !hash_add_ed (ohci, (ed_t *)ed)) { - /* out of memory */ - if (td) - td_free(ohci, td); - spin_unlock_irqrestore (&usb_ed_lock, flags); - return NULL; - } - ed->hwTailP = cpu_to_le32 (td->td_dma); - ed->hwHeadP = ed->hwTailP; - ed->state = ED_UNLINK; - ed->type = usb_pipetype (pipe); - usb_to_ohci (usb_dev)->ed_cnt++; - } - - ohci->dev[usb_pipedevice (pipe)] = usb_dev; - - ed->hwINFO = cpu_to_le32 (usb_pipedevice (pipe) - | usb_pipeendpoint (pipe) << 7 - | (usb_pipeisoc (pipe)? 0x8000: 0) - | (usb_pipecontrol (pipe)? 0: (usb_pipeout (pipe)? 0x800: 0x1000)) - | (usb_dev->speed == USB_SPEED_LOW) << 13 - | usb_maxpacket (usb_dev, pipe, usb_pipeout (pipe)) << 16); - - if (ed->type == PIPE_INTERRUPT && ed->state == ED_UNLINK) { - ed->int_period = interval; - ed->int_load = load; - } - - spin_unlock_irqrestore (&usb_ed_lock, flags); - return ed_ret; -} - -/*-------------------------------------------------------------------------*/ - -/* request the removal of an endpoint - * put the ep on the rm_list and request a stop of the bulk or ctrl list - * real removal is done at the next start frame (SF) hardware interrupt */ - -static void ep_rm_ed (struct usb_device * usb_dev, ed_t * ed) -{ - unsigned int frame; - ohci_t * ohci = usb_dev->bus->hcpriv; - - if ((ed->state & ED_DEL) || (ed->state & ED_URB_DEL)) - return; - - ed->hwINFO |= cpu_to_le32 (OHCI_ED_SKIP); - - if (!ohci->disabled) { - switch (ed->type) { - case PIPE_CONTROL: /* stop control list */ - ohci->hc_control &= ~OHCI_CTRL_CLE; - writel (ohci->hc_control, &ohci->regs->control); - break; - case PIPE_BULK: /* stop bulk list */ - ohci->hc_control &= ~OHCI_CTRL_BLE; - writel (ohci->hc_control, &ohci->regs->control); - break; - } - } - - frame = le16_to_cpu (ohci->hcca->frame_no) & 0x1; - ed->ed_rm_list = ohci->ed_rm_list[frame]; - ohci->ed_rm_list[frame] = ed; - - if (!ohci->disabled && !ohci->sleeping) { - /* enable SOF interrupt */ - writel (OHCI_INTR_SF, &ohci->regs->intrstatus); - writel (OHCI_INTR_SF, &ohci->regs->intrenable); - } -} - -/*-------------------------------------------------------------------------* - * TD handling functions - *-------------------------------------------------------------------------*/ - -/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ - -static void -td_fill (ohci_t * ohci, unsigned int info, - dma_addr_t data, int len, - struct urb * urb, int index) -{ - volatile td_t * td, * td_pt; - urb_priv_t * urb_priv = urb->hcpriv; - - if (index >= urb_priv->length) { - err("internal OHCI error: TD index > length"); - return; - } - - /* use this td as the next dummy */ - td_pt = urb_priv->td [index]; - td_pt->hwNextTD = 0; - - /* fill the old dummy TD */ - td = urb_priv->td [index] = dma_to_td (ohci, - le32_to_cpup (&urb_priv->ed->hwTailP) & ~0xf); - - td->ed = urb_priv->ed; - td->next_dl_td = NULL; - td->index = index; - td->urb = urb; - td->data_dma = data; - if (!len) - data = 0; - - td->hwINFO = cpu_to_le32 (info); - if ((td->ed->type) == PIPE_ISOCHRONOUS) { - td->hwCBP = cpu_to_le32 (data & 0xFFFFF000); - td->ed->last_iso = info & 0xffff; - } else { - td->hwCBP = cpu_to_le32 (data); - } - if (data) - td->hwBE = cpu_to_le32 (data + len - 1); - else - td->hwBE = 0; - td->hwNextTD = cpu_to_le32 (td_pt->td_dma); - td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000); - - /* append to queue */ - td->ed->hwTailP = td->hwNextTD; -} - -/*-------------------------------------------------------------------------*/ - -/* prepare all TDs of a transfer */ - -static void td_submit_urb (struct urb * urb) -{ - urb_priv_t * urb_priv = urb->hcpriv; - ohci_t * ohci = (ohci_t *) urb->dev->bus->hcpriv; - dma_addr_t data; - int data_len = urb->transfer_buffer_length; - int maxps = usb_maxpacket (urb->dev, urb->pipe, usb_pipeout (urb->pipe)); - int cnt = 0; - __u32 info = 0; - unsigned int toggle = 0; - - /* OHCI handles the DATA-toggles itself, we just use the USB-toggle bits for reseting */ - if(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe))) { - toggle = TD_T_TOGGLE; - } else { - toggle = TD_T_DATA0; - usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), 1); - } - - urb_priv->td_cnt = 0; - - if (data_len) { - data = pci_map_single (ohci->ohci_dev, - urb->transfer_buffer, data_len, - usb_pipeout (urb->pipe) - ? PCI_DMA_TODEVICE - : PCI_DMA_FROMDEVICE - ); - } else - data = 0; - - switch (usb_pipetype (urb->pipe)) { - case PIPE_BULK: - info = usb_pipeout (urb->pipe)? - TD_CC | TD_DP_OUT : TD_CC | TD_DP_IN ; - while(data_len > 4096) { - td_fill (ohci, info | (cnt? TD_T_TOGGLE:toggle), data, 4096, urb, cnt); - data += 4096; data_len -= 4096; cnt++; - } - info = usb_pipeout (urb->pipe)? - TD_CC | TD_DP_OUT : TD_CC | TD_R | TD_DP_IN ; - td_fill (ohci, info | (cnt? TD_T_TOGGLE:toggle), data, data_len, urb, cnt); - cnt++; - - /* If the transfer size is multiple of the pipe mtu, - * we may need an extra TD to create a empty frame - * Note : another way to check this condition is - * to test if(urb_priv->length > cnt) - Jean II */ - if ((urb->transfer_flags & USB_ZERO_PACKET) && - usb_pipeout (urb->pipe) && - (urb->transfer_buffer_length != 0) && - ((urb->transfer_buffer_length % maxps) == 0)) { - td_fill (ohci, info | (cnt? TD_T_TOGGLE:toggle), 0, 0, urb, cnt); - cnt++; - } - - if (!ohci->sleeping) - writel (OHCI_BLF, &ohci->regs->cmdstatus); /* start bulk list */ - break; - - case PIPE_INTERRUPT: - info = usb_pipeout (urb->pipe)? - TD_CC | TD_DP_OUT | toggle: TD_CC | TD_R | TD_DP_IN | toggle; - td_fill (ohci, info, data, data_len, urb, cnt++); - break; - - case PIPE_CONTROL: - info = TD_CC | TD_DP_SETUP | TD_T_DATA0; - td_fill (ohci, info, - pci_map_single (ohci->ohci_dev, - urb->setup_packet, 8, - PCI_DMA_TODEVICE), - 8, urb, cnt++); - if (data_len > 0) { - info = usb_pipeout (urb->pipe)? - TD_CC | TD_R | TD_DP_OUT | TD_T_DATA1 : TD_CC | TD_R | TD_DP_IN | TD_T_DATA1; - /* NOTE: mishandles transfers >8K, some >4K */ - td_fill (ohci, info, data, data_len, urb, cnt++); - } - info = usb_pipeout (urb->pipe)? - TD_CC | TD_DP_IN | TD_T_DATA1: TD_CC | TD_DP_OUT | TD_T_DATA1; - td_fill (ohci, info, data, 0, urb, cnt++); - if (!ohci->sleeping) - writel (OHCI_CLF, &ohci->regs->cmdstatus); /* start Control list */ - break; - - case PIPE_ISOCHRONOUS: - for (cnt = 0; cnt < urb->number_of_packets; cnt++) { - td_fill (ohci, TD_CC|TD_ISO | ((urb->start_frame + cnt) & 0xffff), - data + urb->iso_frame_desc[cnt].offset, - urb->iso_frame_desc[cnt].length, urb, cnt); - } - break; - } - if (urb_priv->length != cnt) - dbg("TD LENGTH %d != CNT %d", urb_priv->length, cnt); -} - -/*-------------------------------------------------------------------------* - * Done List handling functions - *-------------------------------------------------------------------------*/ - - -/* calculate the transfer length and update the urb */ - -static void dl_transfer_length(td_t * td) -{ - __u32 tdINFO, tdBE, tdCBP; - __u16 tdPSW; - struct urb * urb = td->urb; - urb_priv_t * urb_priv = urb->hcpriv; - int dlen = 0; - int cc = 0; - - tdINFO = le32_to_cpup (&td->hwINFO); - tdBE = le32_to_cpup (&td->hwBE); - tdCBP = le32_to_cpup (&td->hwCBP); - - - if (tdINFO & TD_ISO) { - tdPSW = le16_to_cpu (td->hwPSW[0]); - cc = (tdPSW >> 12) & 0xF; - if (cc < 0xE) { - if (usb_pipeout(urb->pipe)) { - dlen = urb->iso_frame_desc[td->index].length; - } else { - dlen = tdPSW & 0x3ff; - } - urb->actual_length += dlen; - urb->iso_frame_desc[td->index].actual_length = dlen; - if (!(urb->transfer_flags & USB_DISABLE_SPD) && (cc == TD_DATAUNDERRUN)) - cc = TD_CC_NOERROR; - - urb->iso_frame_desc[td->index].status = cc_to_error[cc]; - } - } else { /* BULK, INT, CONTROL DATA */ - if (!(usb_pipetype (urb->pipe) == PIPE_CONTROL && - ((td->index == 0) || (td->index == urb_priv->length - 1)))) { - if (tdBE != 0) { - if (td->hwCBP == 0) - urb->actual_length += tdBE - td->data_dma + 1; - else - urb->actual_length += tdCBP - td->data_dma; - } - } - } -} - -/* handle an urb that is being unlinked */ - -static void dl_del_urb (struct urb * urb) -{ - wait_queue_head_t * wait_head = ((urb_priv_t *)(urb->hcpriv))->wait; - - urb_rm_priv_locked (urb); - - if (urb->transfer_flags & USB_ASYNC_UNLINK) { - urb->status = -ECONNRESET; - if (urb->complete) - urb->complete (urb); - } else { - urb->status = -ENOENT; - - /* unblock sohci_unlink_urb */ - if (wait_head) - wake_up (wait_head); - } -} - -/*-------------------------------------------------------------------------*/ - -/* replies to the request have to be on a FIFO basis so - * we reverse the reversed done-list */ - -td_t * dl_reverse_done_list (ohci_t * ohci) -{ - __u32 td_list_hc; - td_t * td_rev = NULL; - td_t * td_list = NULL; - urb_priv_t * urb_priv = NULL; - unsigned long flags; - - spin_lock_irqsave (&usb_ed_lock, flags); - - td_list_hc = le32_to_cpup (&ohci->hcca->done_head) & 0xfffffff0; - ohci->hcca->done_head = 0; - - while (td_list_hc) { - td_list = dma_to_td (ohci, td_list_hc); - - if (TD_CC_GET (le32_to_cpup (&td_list->hwINFO))) { - urb_priv = (urb_priv_t *) td_list->urb->hcpriv; - dbg(" USB-error/status: %x : %p", - TD_CC_GET (le32_to_cpup (&td_list->hwINFO)), td_list); - if (td_list->ed->hwHeadP & cpu_to_le32 (0x1)) { - if (urb_priv && ((td_list->index + 1) < urb_priv->length)) { - td_list->ed->hwHeadP = - (urb_priv->td[urb_priv->length - 1]->hwNextTD & cpu_to_le32 (0xfffffff0)) | - (td_list->ed->hwHeadP & cpu_to_le32 (0x2)); - urb_priv->td_cnt += urb_priv->length - td_list->index - 1; - } else - td_list->ed->hwHeadP &= cpu_to_le32 (0xfffffff2); - } - } - - td_list->next_dl_td = td_rev; - td_rev = td_list; - td_list_hc = le32_to_cpup (&td_list->hwNextTD) & 0xfffffff0; - } - spin_unlock_irqrestore (&usb_ed_lock, flags); - return td_list; -} - -/*-------------------------------------------------------------------------*/ - -/* there are some pending requests to remove - * - some of the eds (if ed->state & ED_DEL (set by sohci_free_dev) - * - some URBs/TDs if urb_priv->state == URB_DEL */ - -static void dl_del_list (ohci_t * ohci, unsigned int frame) -{ - unsigned long flags; - ed_t * ed; - __u32 edINFO; - __u32 tdINFO; - td_t * td = NULL, * td_next = NULL, * tdHeadP = NULL, * tdTailP; - __u32 * td_p; - int ctrl = 0, bulk = 0; - - spin_lock_irqsave (&usb_ed_lock, flags); - - for (ed = ohci->ed_rm_list[frame]; ed != NULL; ed = ed->ed_rm_list) { - - tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP) & 0xfffffff0); - tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP) & 0xfffffff0); - edINFO = le32_to_cpup (&ed->hwINFO); - td_p = &ed->hwHeadP; - - for (td = tdHeadP; td != tdTailP; td = td_next) { - struct urb * urb = td->urb; - urb_priv_t * urb_priv = td->urb->hcpriv; - - td_next = dma_to_td (ohci, le32_to_cpup (&td->hwNextTD) & 0xfffffff0); - if ((urb_priv->state == URB_DEL) || (ed->state & ED_DEL)) { - tdINFO = le32_to_cpup (&td->hwINFO); - if (TD_CC_GET (tdINFO) < 0xE) - dl_transfer_length (td); - *td_p = td->hwNextTD | (*td_p & cpu_to_le32 (0x3)); - - /* URB is done; clean up */ - if (++(urb_priv->td_cnt) == urb_priv->length) - dl_del_urb (urb); - } else { - td_p = &td->hwNextTD; - } - } - - if (ed->state & ED_DEL) { /* set by sohci_free_dev */ - struct ohci_device * dev = usb_to_ohci (ohci->dev[edINFO & 0x7F]); - td_free (ohci, tdTailP); /* free dummy td */ - ed->hwINFO = cpu_to_le32 (OHCI_ED_SKIP); - ed->state = ED_NEW; - hash_free_ed(ohci, ed); - /* if all eds are removed wake up sohci_free_dev */ - if (!--dev->ed_cnt) { - wait_queue_head_t *wait_head = dev->wait; - - dev->wait = 0; - if (wait_head) - wake_up (wait_head); - } - } else { - ed->state &= ~ED_URB_DEL; - tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP) & 0xfffffff0); - - if (tdHeadP == tdTailP) { - if (ed->state == ED_OPER) - ep_unlink(ohci, ed); - td_free (ohci, tdTailP); - ed->hwINFO = cpu_to_le32 (OHCI_ED_SKIP); - ed->state = ED_NEW; - hash_free_ed(ohci, ed); - --(usb_to_ohci (ohci->dev[edINFO & 0x7F]))->ed_cnt; - } else - ed->hwINFO &= ~cpu_to_le32 (OHCI_ED_SKIP); - } - - switch (ed->type) { - case PIPE_CONTROL: - ctrl = 1; - break; - case PIPE_BULK: - bulk = 1; - break; - } - } - - /* maybe reenable control and bulk lists */ - if (!ohci->disabled) { - if (ctrl) /* reset control list */ - writel (0, &ohci->regs->ed_controlcurrent); - if (bulk) /* reset bulk list */ - writel (0, &ohci->regs->ed_bulkcurrent); - if (!ohci->ed_rm_list[!frame] && !ohci->sleeping) { - if (ohci->ed_controltail) - ohci->hc_control |= OHCI_CTRL_CLE; - if (ohci->ed_bulktail) - ohci->hc_control |= OHCI_CTRL_BLE; - writel (ohci->hc_control, &ohci->regs->control); - } - } - - ohci->ed_rm_list[frame] = NULL; - spin_unlock_irqrestore (&usb_ed_lock, flags); -} - - - -/*-------------------------------------------------------------------------*/ - -/* td done list */ - -void dl_done_list (ohci_t * ohci, td_t * td_list) -{ - td_t * td_list_next = NULL; - ed_t * ed; - int cc = 0; - struct urb * urb; - urb_priv_t * urb_priv; - __u32 tdINFO, edHeadP, edTailP; - - unsigned long flags; - - while (td_list) { - td_list_next = td_list->next_dl_td; - - urb = td_list->urb; - urb_priv = urb->hcpriv; - tdINFO = le32_to_cpup (&td_list->hwINFO); - - ed = td_list->ed; - - dl_transfer_length(td_list); - - /* error code of transfer */ - cc = TD_CC_GET (tdINFO); - if (cc == TD_CC_STALL) - usb_endpoint_halt(urb->dev, - usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe)); - - if (!(urb->transfer_flags & USB_DISABLE_SPD) - && (cc == TD_DATAUNDERRUN)) - cc = TD_CC_NOERROR; - - if (++(urb_priv->td_cnt) == urb_priv->length) { - if ((ed->state & (ED_OPER | ED_UNLINK)) - && (urb_priv->state != URB_DEL)) { - urb->status = cc_to_error[cc]; - sohci_return_urb (ohci, urb); - } else { - spin_lock_irqsave (&usb_ed_lock, flags); - dl_del_urb (urb); - spin_unlock_irqrestore (&usb_ed_lock, flags); - } - } - - spin_lock_irqsave (&usb_ed_lock, flags); - if (ed->state != ED_NEW) { - edHeadP = le32_to_cpup (&ed->hwHeadP) & 0xfffffff0; - edTailP = le32_to_cpup (&ed->hwTailP); - - /* unlink eds if they are not busy */ - if ((edHeadP == edTailP) && (ed->state == ED_OPER)) - ep_unlink (ohci, ed); - } - spin_unlock_irqrestore (&usb_ed_lock, flags); - - td_list = td_list_next; - } -} - - - - -/*-------------------------------------------------------------------------* - * Virtual Root Hub - *-------------------------------------------------------------------------*/ - -/* Device descriptor */ -static __u8 root_hub_dev_des[] = -{ - 0x12, /* __u8 bLength; */ - 0x01, /* __u8 bDescriptorType; Device */ - 0x10, /* __u16 bcdUSB; v1.1 */ - 0x01, - 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */ - 0x00, /* __u8 bDeviceSubClass; */ - 0x00, /* __u8 bDeviceProtocol; */ - 0x08, /* __u8 bMaxPacketSize0; 8 Bytes */ - 0x00, /* __u16 idVendor; */ - 0x00, - 0x00, /* __u16 idProduct; */ - 0x00, - 0x00, /* __u16 bcdDevice; */ - 0x00, - 0x00, /* __u8 iManufacturer; */ - 0x02, /* __u8 iProduct; */ - 0x01, /* __u8 iSerialNumber; */ - 0x01 /* __u8 bNumConfigurations; */ -}; - - -/* Configuration descriptor */ -static __u8 root_hub_config_des[] = -{ - 0x09, /* __u8 bLength; */ - 0x02, /* __u8 bDescriptorType; Configuration */ - 0x19, /* __u16 wTotalLength; */ - 0x00, - 0x01, /* __u8 bNumInterfaces; */ - 0x01, /* __u8 bConfigurationValue; */ - 0x00, /* __u8 iConfiguration; */ - 0x40, /* __u8 bmAttributes; - Bit 7: Bus-powered, 6: Self-powered, 5 Remote-wakwup, 4..0: resvd */ - 0x00, /* __u8 MaxPower; */ - - /* interface */ - 0x09, /* __u8 if_bLength; */ - 0x04, /* __u8 if_bDescriptorType; Interface */ - 0x00, /* __u8 if_bInterfaceNumber; */ - 0x00, /* __u8 if_bAlternateSetting; */ - 0x01, /* __u8 if_bNumEndpoints; */ - 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */ - 0x00, /* __u8 if_bInterfaceSubClass; */ - 0x00, /* __u8 if_bInterfaceProtocol; */ - 0x00, /* __u8 if_iInterface; */ - - /* endpoint */ - 0x07, /* __u8 ep_bLength; */ - 0x05, /* __u8 ep_bDescriptorType; Endpoint */ - 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */ - 0x03, /* __u8 ep_bmAttributes; Interrupt */ - 0x02, /* __u16 ep_wMaxPacketSize; ((MAX_ROOT_PORTS + 1) / 8 */ - 0x00, - 0xff /* __u8 ep_bInterval; 255 ms */ -}; - -/* Hub class-specific descriptor is constructed dynamically */ - - -/*-------------------------------------------------------------------------*/ - -/* prepare Interrupt pipe data; HUB INTERRUPT ENDPOINT */ - -static int rh_send_irq (ohci_t * ohci, void * rh_data, int rh_len) -{ - int num_ports; - int i; - int ret; - int len; - - __u8 data[8]; - - num_ports = roothub_a (ohci) & RH_A_NDP; - if (num_ports > MAX_ROOT_PORTS) { - err ("bogus NDP=%d for OHCI usb-%s", num_ports, - ohci->slot_name); - err ("rereads as NDP=%d", - readl (&ohci->regs->roothub.a) & RH_A_NDP); - /* retry later; "should not happen" */ - return 0; - } - *(__u8 *) data = (roothub_status (ohci) & (RH_HS_LPSC | RH_HS_OCIC)) - ? 1: 0; - ret = *(__u8 *) data; - - for ( i = 0; i < num_ports; i++) { - *(__u8 *) (data + (i + 1) / 8) |= - ((roothub_portstatus (ohci, i) & - (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) - ? 1: 0) << ((i + 1) % 8); - ret += *(__u8 *) (data + (i + 1) / 8); - } - len = i/8 + 1; - - if (ret > 0) { - memcpy(rh_data, data, - min_t(unsigned int, len, - min_t(unsigned int, rh_len, sizeof(data)))); - return len; - } - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* Virtual Root Hub INTs are polled by this timer every "interval" ms */ - -static void rh_int_timer_do (unsigned long ptr) -{ - int len; - - struct urb * urb = (struct urb *) ptr; - ohci_t * ohci = urb->dev->bus->hcpriv; - - if (ohci->disabled) - return; - - /* ignore timers firing during PM suspend, etc */ - if ((ohci->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER) - goto out; - - if(ohci->rh.send) { - len = rh_send_irq (ohci, urb->transfer_buffer, urb->transfer_buffer_length); - if (len > 0) { - urb->actual_length = len; -#ifdef DEBUG - urb_print (urb, "RET-t(rh)", usb_pipeout (urb->pipe)); -#endif - if (urb->complete) - urb->complete (urb); - } - } - out: - rh_init_int_timer (urb); -} - -/*-------------------------------------------------------------------------*/ - -/* Root Hub INTs are polled by this timer */ - -static int rh_init_int_timer (struct urb * urb) -{ - ohci_t * ohci = urb->dev->bus->hcpriv; - - ohci->rh.interval = urb->interval; - init_timer (&ohci->rh.rh_int_timer); - ohci->rh.rh_int_timer.function = rh_int_timer_do; - ohci->rh.rh_int_timer.data = (unsigned long) urb; - ohci->rh.rh_int_timer.expires = - jiffies + (HZ * (urb->interval < 30? 30: urb->interval)) / 1000; - add_timer (&ohci->rh.rh_int_timer); - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -#define OK(x) len = (x); break -#define WR_RH_STAT(x) writel((x), &ohci->regs->roothub.status) -#define WR_RH_PORTSTAT(x) writel((x), &ohci->regs->roothub.portstatus[wIndex-1]) -#define RD_RH_STAT roothub_status(ohci) -#define RD_RH_PORTSTAT roothub_portstatus(ohci,wIndex-1) - -/* request to virtual root hub */ - -static int rh_submit_urb (struct urb * urb) -{ - struct usb_device * usb_dev = urb->dev; - ohci_t * ohci = usb_dev->bus->hcpriv; - unsigned int pipe = urb->pipe; - struct usb_ctrlrequest * cmd = (struct usb_ctrlrequest *) urb->setup_packet; - void * data = urb->transfer_buffer; - int leni = urb->transfer_buffer_length; - int len = 0; - int status = TD_CC_NOERROR; - - __u32 datab[4]; - __u8 * data_buf = (__u8 *) datab; - - __u16 bmRType_bReq; - __u16 wValue; - __u16 wIndex; - __u16 wLength; - - if (usb_pipeint(pipe)) { - ohci->rh.urb = urb; - ohci->rh.send = 1; - ohci->rh.interval = urb->interval; - rh_init_int_timer(urb); - urb->status = cc_to_error [TD_CC_NOERROR]; - - return 0; - } - - bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8); - wValue = le16_to_cpu (cmd->wValue); - wIndex = le16_to_cpu (cmd->wIndex); - wLength = le16_to_cpu (cmd->wLength); - - switch (bmRType_bReq) { - /* Request Destination: - without flags: Device, - RH_INTERFACE: interface, - RH_ENDPOINT: endpoint, - RH_CLASS means HUB here, - RH_OTHER | RH_CLASS almost ever means HUB_PORT here - */ - - case RH_GET_STATUS: - *(__u16 *) data_buf = cpu_to_le16 (1); OK (2); - case RH_GET_STATUS | RH_INTERFACE: - *(__u16 *) data_buf = cpu_to_le16 (0); OK (2); - case RH_GET_STATUS | RH_ENDPOINT: - *(__u16 *) data_buf = cpu_to_le16 (0); OK (2); - case RH_GET_STATUS | RH_CLASS: - *(__u32 *) data_buf = cpu_to_le32 ( - RD_RH_STAT & ~(RH_HS_CRWE | RH_HS_DRWE)); - OK (4); - case RH_GET_STATUS | RH_OTHER | RH_CLASS: - *(__u32 *) data_buf = cpu_to_le32 (RD_RH_PORTSTAT); OK (4); - - case RH_CLEAR_FEATURE | RH_ENDPOINT: - switch (wValue) { - case (RH_ENDPOINT_STALL): OK (0); - } - break; - - case RH_CLEAR_FEATURE | RH_CLASS: - switch (wValue) { - case RH_C_HUB_LOCAL_POWER: - OK(0); - case (RH_C_HUB_OVER_CURRENT): - WR_RH_STAT(RH_HS_OCIC); OK (0); - } - break; - - case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS: - switch (wValue) { - case (RH_PORT_ENABLE): - WR_RH_PORTSTAT (RH_PS_CCS ); OK (0); - case (RH_PORT_SUSPEND): - WR_RH_PORTSTAT (RH_PS_POCI); OK (0); - case (RH_PORT_POWER): - WR_RH_PORTSTAT (RH_PS_LSDA); OK (0); - case (RH_C_PORT_CONNECTION): - WR_RH_PORTSTAT (RH_PS_CSC ); OK (0); - case (RH_C_PORT_ENABLE): - WR_RH_PORTSTAT (RH_PS_PESC); OK (0); - case (RH_C_PORT_SUSPEND): - WR_RH_PORTSTAT (RH_PS_PSSC); OK (0); - case (RH_C_PORT_OVER_CURRENT): - WR_RH_PORTSTAT (RH_PS_OCIC); OK (0); - case (RH_C_PORT_RESET): - WR_RH_PORTSTAT (RH_PS_PRSC); OK (0); - } - break; - - case RH_SET_FEATURE | RH_OTHER | RH_CLASS: - switch (wValue) { - case (RH_PORT_SUSPEND): - WR_RH_PORTSTAT (RH_PS_PSS ); OK (0); - case (RH_PORT_RESET): /* BUG IN HUP CODE *********/ - if (RD_RH_PORTSTAT & RH_PS_CCS) - WR_RH_PORTSTAT (RH_PS_PRS); - OK (0); - case (RH_PORT_POWER): - WR_RH_PORTSTAT (RH_PS_PPS ); OK (0); - case (RH_PORT_ENABLE): /* BUG IN HUP CODE *********/ - if (RD_RH_PORTSTAT & RH_PS_CCS) - WR_RH_PORTSTAT (RH_PS_PES ); - OK (0); - } - break; - - case RH_SET_ADDRESS: ohci->rh.devnum = wValue; OK(0); - - case RH_GET_DESCRIPTOR: - switch ((wValue & 0xff00) >> 8) { - case (0x01): /* device descriptor */ - len = min_t(unsigned int, - leni, - min_t(unsigned int, - sizeof (root_hub_dev_des), - wLength)); - data_buf = root_hub_dev_des; OK(len); - case (0x02): /* configuration descriptor */ - len = min_t(unsigned int, - leni, - min_t(unsigned int, - sizeof (root_hub_config_des), - wLength)); - data_buf = root_hub_config_des; OK(len); - case (0x03): /* string descriptors */ - len = usb_root_hub_string (wValue & 0xff, - (int)(long) ohci->regs, "OHCI", - data, wLength); - if (len > 0) { - data_buf = data; - OK(min_t(int, leni, len)); - } - // else fallthrough - default: - status = TD_CC_STALL; - } - break; - - case RH_GET_DESCRIPTOR | RH_CLASS: - { - __u32 temp = roothub_a (ohci); - - data_buf [0] = 9; // min length; - data_buf [1] = 0x29; - data_buf [2] = temp & RH_A_NDP; - data_buf [3] = 0; - if (temp & RH_A_PSM) /* per-port power switching? */ - data_buf [3] |= 0x1; - if (temp & RH_A_NOCP) /* no overcurrent reporting? */ - data_buf [3] |= 0x10; - else if (temp & RH_A_OCPM) /* per-port overcurrent reporting? */ - data_buf [3] |= 0x8; - - datab [1] = 0; - data_buf [5] = (temp & RH_A_POTPGT) >> 24; - temp = roothub_b (ohci); - data_buf [7] = temp & RH_B_DR; - if (data_buf [2] < 7) { - data_buf [8] = 0xff; - } else { - data_buf [0] += 2; - data_buf [8] = (temp & RH_B_DR) >> 8; - data_buf [10] = data_buf [9] = 0xff; - } - - len = min_t(unsigned int, leni, - min_t(unsigned int, data_buf [0], wLength)); - OK (len); - } - - case RH_GET_CONFIGURATION: *(__u8 *) data_buf = 0x01; OK (1); - - case RH_SET_CONFIGURATION: WR_RH_STAT (0x10000); OK (0); - - default: - dbg ("unsupported root hub command"); - status = TD_CC_STALL; - } - -#ifdef DEBUG - // ohci_dump_roothub (ohci, 0); -#endif - - len = min_t(int, len, leni); - if (data != data_buf) - memcpy (data, data_buf, len); - urb->actual_length = len; - urb->status = cc_to_error [status]; - -#ifdef DEBUG - urb_print (urb, "RET(rh)", usb_pipeout (urb->pipe)); -#endif - - urb->hcpriv = NULL; - usb_put_dev (usb_dev); - urb->dev = NULL; - if (urb->complete) - urb->complete (urb); - usb_put_urb (urb); - return 0; -} - -/*-------------------------------------------------------------------------*/ - -static int rh_unlink_urb (struct urb * urb) -{ - ohci_t * ohci = urb->dev->bus->hcpriv; - - if (ohci->rh.urb == urb) { - ohci->rh.send = 0; - del_timer (&ohci->rh.rh_int_timer); - ohci->rh.urb = NULL; - - urb->hcpriv = NULL; - usb_put_dev (urb->dev); - urb->dev = NULL; - if (urb->transfer_flags & USB_ASYNC_UNLINK) { - urb->status = -ECONNRESET; - if (urb->complete) - urb->complete (urb); - } else - urb->status = -ENOENT; - usb_put_urb (urb); - } - return 0; -} - -/*-------------------------------------------------------------------------* - * HC functions - *-------------------------------------------------------------------------*/ - -/* reset the HC and BUS */ - -int hc_reset (ohci_t * ohci) -{ - int timeout = 30; - int smm_timeout = 50; /* 0,5 sec */ - - if (readl (&ohci->regs->control) & OHCI_CTRL_IR) { /* SMM owns the HC */ - writel (OHCI_OCR, &ohci->regs->cmdstatus); /* request ownership */ - dbg("USB HC TakeOver from SMM"); - while (readl (&ohci->regs->control) & OHCI_CTRL_IR) { - wait_ms (10); - if (--smm_timeout == 0) { - err("USB HC TakeOver failed!"); - return -1; - } - } - } - - /* Disable HC interrupts */ - writel (OHCI_INTR_MIE, &ohci->regs->intrdisable); - - dbg("USB HC reset_hc usb-%s: ctrl = 0x%x ;", - ohci->slot_name, - readl (&ohci->regs->control)); - - /* Reset USB (needed by some controllers) */ - writel (0, &ohci->regs->control); - - /* HC Reset requires max 10 ms delay */ - writel (OHCI_HCR, &ohci->regs->cmdstatus); - while ((readl (&ohci->regs->cmdstatus) & OHCI_HCR) != 0) { - if (--timeout == 0) { - err("USB HC reset timed out!"); - return -1; - } - udelay (1); - } - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* Start an OHCI controller, set the BUS operational - * enable interrupts - * connect the virtual root hub */ - -int hc_start (ohci_t * ohci, struct device *parent_dev) -{ - __u32 mask; - unsigned int fminterval; - struct usb_device * usb_dev; - struct ohci_device * dev; - - ohci->disabled = 1; - - /* Tell the controller where the control and bulk lists are - * The lists are empty now. */ - - writel (0, &ohci->regs->ed_controlhead); - writel (0, &ohci->regs->ed_bulkhead); - - writel (ohci->hcca_dma, &ohci->regs->hcca); /* a reset clears this */ - - fminterval = 0x2edf; - writel ((fminterval * 9) / 10, &ohci->regs->periodicstart); - fminterval |= ((((fminterval - 210) * 6) / 7) << 16); - writel (fminterval, &ohci->regs->fminterval); - writel (0x628, &ohci->regs->lsthresh); - - /* start controller operations */ - ohci->hc_control = OHCI_CONTROL_INIT | OHCI_USB_OPER; - ohci->disabled = 0; - writel (ohci->hc_control, &ohci->regs->control); - - /* Choose the interrupts we care about now, others later on demand */ - mask = OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_WDH | OHCI_INTR_SO; - writel (mask, &ohci->regs->intrenable); - writel (mask, &ohci->regs->intrstatus); - -#ifdef OHCI_USE_NPS - /* required for AMD-756 and some Mac platforms */ - writel ((roothub_a (ohci) | RH_A_NPS) & ~RH_A_PSM, - &ohci->regs->roothub.a); - writel (RH_HS_LPSC, &ohci->regs->roothub.status); -#endif /* OHCI_USE_NPS */ - - // POTPGT delay is bits 24-31, in 2 ms units. - mdelay ((roothub_a (ohci) >> 23) & 0x1fe); - - /* connect the virtual root hub */ - ohci->rh.devnum = 0; - usb_dev = usb_alloc_dev (NULL, ohci->bus); - if (!usb_dev) { - ohci->disabled = 1; - return -ENOMEM; - } - - dev = usb_to_ohci (usb_dev); - ohci->bus->root_hub = usb_dev; - usb_connect (usb_dev); - if (usb_register_root_hub (usb_dev, parent_dev) != 0) { - usb_free_dev (usb_dev); - ohci->disabled = 1; - return -ENODEV; - } - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -/* called only from interrupt handler */ - -static void check_timeouts (struct ohci *ohci) -{ - spin_lock (&usb_ed_lock); - while (!list_empty (&ohci->timeout_list)) { - struct urb *urb; - - urb = list_entry (ohci->timeout_list.next, struct urb, urb_list); - if (time_after (jiffies, urb->timeout)) - break; - - list_del_init (&urb->urb_list); - if (urb->status != -EINPROGRESS) - continue; - - urb->transfer_flags |= USB_TIMEOUT_KILLED | USB_ASYNC_UNLINK; - spin_unlock (&usb_ed_lock); - - // outside the interrupt handler (in a timer...) - // this reference would race interrupts - sohci_unlink_urb (urb); - - spin_lock (&usb_ed_lock); - } - spin_unlock (&usb_ed_lock); -} - - -/*-------------------------------------------------------------------------*/ - -/* an interrupt happens */ - -static void hc_interrupt (int irq, void * __ohci, struct pt_regs * r) -{ - ohci_t * ohci = __ohci; - struct ohci_regs * regs = ohci->regs; - int ints; - - if ((ohci->hcca->done_head != 0) && !(le32_to_cpup (&ohci->hcca->done_head) & 0x01)) { - ints = OHCI_INTR_WDH; - } else if ((ints = (readl (®s->intrstatus) & readl (®s->intrenable))) == 0) { - return; - } - - // dbg("Interrupt: %x frame: %x", ints, le16_to_cpu (ohci->hcca->frame_no)); - - if (ints & OHCI_INTR_UE) { - ohci->disabled++; - err ("OHCI Unrecoverable Error, controller usb-%s disabled", - ohci->slot_name); - // e.g. due to PCI Master/Target Abort - -#ifdef DEBUG - ohci_dump (ohci, 1); -#else - // FIXME: be optimistic, hope that bug won't repeat often. - // Make some non-interrupt context restart the controller. - // Count and limit the retries though; either hardware or - // software errors can go forever... -#endif - hc_reset (ohci); - } - - if (ints & OHCI_INTR_WDH) { - writel (OHCI_INTR_WDH, ®s->intrdisable); - dl_done_list (ohci, dl_reverse_done_list (ohci)); - writel (OHCI_INTR_WDH, ®s->intrenable); - } - - if (ints & OHCI_INTR_SO) { - dbg("USB Schedule overrun"); - writel (OHCI_INTR_SO, ®s->intrenable); - } - - // FIXME: this assumes SOF (1/ms) interrupts don't get lost... - if (ints & OHCI_INTR_SF) { - unsigned int frame = le16_to_cpu (ohci->hcca->frame_no) & 1; - writel (OHCI_INTR_SF, ®s->intrdisable); - if (ohci->ed_rm_list[!frame] != NULL) { - dl_del_list (ohci, !frame); - } - if (ohci->ed_rm_list[frame] != NULL) - writel (OHCI_INTR_SF, ®s->intrenable); - } - - if (!list_empty (&ohci->timeout_list)) { - check_timeouts (ohci); -// FIXME: enable SF as needed in a timer; -// don't make lots of 1ms interrupts -// On unloaded USB, think 4k ~= 4-5msec - if (!list_empty (&ohci->timeout_list)) - writel (OHCI_INTR_SF, ®s->intrenable); - } - - writel (ints, ®s->intrstatus); - writel (OHCI_INTR_MIE, ®s->intrenable); -} - -/*-------------------------------------------------------------------------*/ - -/* allocate OHCI */ - -static ohci_t * __devinit hc_alloc_ohci (struct pci_dev *dev, void * mem_base) -{ - ohci_t * ohci; - - ohci = (ohci_t *) kmalloc (sizeof *ohci, GFP_KERNEL); - if (!ohci) - return NULL; - - memset (ohci, 0, sizeof (ohci_t)); - - ohci->hcca = pci_alloc_consistent (dev, sizeof *ohci->hcca, - &ohci->hcca_dma); - if (!ohci->hcca) { - kfree (ohci); - return NULL; - } - memset (ohci->hcca, 0, sizeof (struct ohci_hcca)); - - ohci->disabled = 1; - ohci->sleeping = 0; - ohci->irq = -1; - ohci->regs = mem_base; - - ohci->ohci_dev = dev; -#ifdef CONFIG_PCI - pci_set_drvdata(dev, ohci); -#endif - - INIT_LIST_HEAD (&ohci->ohci_hcd_list); - list_add (&ohci->ohci_hcd_list, &ohci_hcd_list); - - INIT_LIST_HEAD (&ohci->timeout_list); - - ohci->bus = usb_alloc_bus (&sohci_device_operations); - if (!ohci->bus) { -#ifdef CONFIG_PCI - pci_set_drvdata (dev, NULL); -#endif - pci_free_consistent (ohci->ohci_dev, sizeof *ohci->hcca, - ohci->hcca, ohci->hcca_dma); - kfree (ohci); - return NULL; - } - ohci->bus->hcpriv = (void *) ohci; -#ifdef CONFIG_PCI - ohci->bus->bus_name = dev->slot_name; -#else - ohci->bus->bus_name = "ohci-hc"; -#endif - - return ohci; -} - - -/*-------------------------------------------------------------------------*/ - -/* De-allocate all resources.. */ - -static void hc_release_ohci (ohci_t * ohci) -{ - dbg ("USB HC release ohci usb-%s", ohci->slot_name); - - /* disconnect all devices */ - if (ohci->bus->root_hub) - usb_disconnect (&ohci->bus->root_hub); - - if (!ohci->disabled) - hc_reset (ohci); - - if (ohci->irq >= 0) { - free_irq (ohci->irq, ohci); - ohci->irq = -1; - } -#ifdef CONFIG_PCI - pci_set_drvdata(ohci->ohci_dev, NULL); -#endif - if (ohci->bus) { - if (ohci->bus->busnum) - usb_deregister_bus (ohci->bus); - usb_free_bus (ohci->bus); - } - - list_del (&ohci->ohci_hcd_list); - INIT_LIST_HEAD (&ohci->ohci_hcd_list); - - ohci_mem_cleanup (ohci); - - pci_free_consistent (ohci->ohci_dev, sizeof *ohci->hcca, - ohci->hcca, ohci->hcca_dma); - kfree (ohci); -} - -/*-------------------------------------------------------------------------*/ - -/* - * Host bus independent add one OHCI host controller. - */ -int -hc_add_ohci(struct pci_dev *dev, int irq, void *mem_base, unsigned long flags, - ohci_t **ohcip, const char *name, const char *slot_name) -{ - char buf[8], *bufp = buf; - ohci_t * ohci; - int ret; - -#ifndef __sparc__ - sprintf(buf, "%d", irq); -#else - bufp = __irq_itoa(irq); -#endif - printk(KERN_INFO __FILE__ ": USB OHCI at membase 0x%lx, IRQ %s\n", - (unsigned long) mem_base, bufp); - - ohci = hc_alloc_ohci (dev, mem_base); - if (!ohci) { - return -ENOMEM; - } - ohci->slot_name = slot_name; - if ((ret = ohci_mem_init (ohci)) < 0) { - hc_release_ohci (ohci); - return ret; - } - ohci->flags = flags; - if (ohci->flags & OHCI_QUIRK_AMD756) - printk (KERN_INFO __FILE__ ": AMD756 erratum 4 workaround\n"); - - if (hc_reset (ohci) < 0) { - hc_release_ohci (ohci); - return -ENODEV; - } - - /* FIXME this is a second HC reset; why?? */ - writel (ohci->hc_control = OHCI_USB_RESET, &ohci->regs->control); - wait_ms (10); - - usb_register_bus (ohci->bus); - - if (request_irq (irq, hc_interrupt, SA_SHIRQ, name, ohci) != 0) { - err ("request interrupt %s failed", bufp); - hc_release_ohci (ohci); - return -EBUSY; - } - ohci->irq = irq; - - *ohcip = ohci; - - return 0; -} - -/* - * Host bus independent remove one OHCI host controller. - */ -void hc_remove_ohci(ohci_t *ohci) -{ -#ifdef DEBUG - ohci_dump (ohci, 1); -#endif - - /* don't wake up sleeping controllers, or block in interrupt context */ - if ((ohci->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER || in_interrupt ()) { - dbg ("controller being disabled"); - ohci->disabled = 1; - } - - /* on return, USB will always be reset (if present) */ - if (ohci->disabled) - writel (ohci->hc_control = OHCI_USB_RESET, - &ohci->regs->control); - - hc_release_ohci (ohci); -} - -MODULE_AUTHOR( DRIVER_AUTHOR ); -MODULE_DESCRIPTION( DRIVER_DESC ); -MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(hc_add_ohci); -EXPORT_SYMBOL(hc_remove_ohci); -EXPORT_SYMBOL(hc_start); -EXPORT_SYMBOL(hc_reset); -EXPORT_SYMBOL(dl_done_list); -EXPORT_SYMBOL(dl_reverse_done_list); -EXPORT_SYMBOL(usb_ed_lock); diff -Nru a/drivers/usb/host/usb-ohci.h b/drivers/usb/host/usb-ohci.h --- a/drivers/usb/host/usb-ohci.h Sat Jul 20 12:12:35 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,648 +0,0 @@ -/* - * URB OHCI HCD (Host Controller Driver) for USB. - * - * (C) Copyright 1999 Roman Weissgaerber - * (C) Copyright 2000-2001 David Brownell - * - * usb-ohci.h - */ - - -static int cc_to_error[16] = { - -/* mapping of the OHCI CC status to error codes */ - /* No Error */ 0, - /* CRC Error */ -EILSEQ, - /* Bit Stuff */ -EPROTO, - /* Data Togg */ -EILSEQ, - /* Stall */ -EPIPE, - /* DevNotResp */ -ETIMEDOUT, - /* PIDCheck */ -EPROTO, - /* UnExpPID */ -EPROTO, - /* DataOver */ -EOVERFLOW, - /* DataUnder */ -EREMOTEIO, - /* reservd */ -ETIMEDOUT, - /* reservd */ -ETIMEDOUT, - /* BufferOver */ -ECOMM, - /* BuffUnder */ -ENOSR, - /* Not Access */ -ETIMEDOUT, - /* Not Access */ -ETIMEDOUT -}; - -#include - -/* ED States */ - -#define ED_NEW 0x00 -#define ED_UNLINK 0x01 -#define ED_OPER 0x02 -#define ED_DEL 0x04 -#define ED_URB_DEL 0x08 - -/* usb_ohci_ed */ -struct ed { - __u32 hwINFO; - __u32 hwTailP; - __u32 hwHeadP; - __u32 hwNextED; - - struct ed * ed_prev; - __u8 int_period; - __u8 int_branch; - __u8 int_load; - __u8 int_interval; - __u8 state; - __u8 type; - __u16 last_iso; - struct ed * ed_rm_list; - - dma_addr_t dma; - __u32 unused[3]; -} __attribute((aligned(16))); -typedef struct ed ed_t; - - -/* TD info field */ -#define TD_CC 0xf0000000 -#define TD_CC_GET(td_p) ((td_p >>28) & 0x0f) -#define TD_CC_SET(td_p, cc) (td_p) = ((td_p) & 0x0fffffff) | (((cc) & 0x0f) << 28) -#define TD_EC 0x0C000000 -#define TD_T 0x03000000 -#define TD_T_DATA0 0x02000000 -#define TD_T_DATA1 0x03000000 -#define TD_T_TOGGLE 0x00000000 -#define TD_R 0x00040000 -#define TD_DI 0x00E00000 -#define TD_DI_SET(X) (((X) & 0x07)<< 21) -#define TD_DP 0x00180000 -#define TD_DP_SETUP 0x00000000 -#define TD_DP_IN 0x00100000 -#define TD_DP_OUT 0x00080000 - -#define TD_ISO 0x00010000 -#define TD_DEL 0x00020000 - -/* CC Codes */ -#define TD_CC_NOERROR 0x00 -#define TD_CC_CRC 0x01 -#define TD_CC_BITSTUFFING 0x02 -#define TD_CC_DATATOGGLEM 0x03 -#define TD_CC_STALL 0x04 -#define TD_DEVNOTRESP 0x05 -#define TD_PIDCHECKFAIL 0x06 -#define TD_UNEXPECTEDPID 0x07 -#define TD_DATAOVERRUN 0x08 -#define TD_DATAUNDERRUN 0x09 -#define TD_BUFFEROVERRUN 0x0C -#define TD_BUFFERUNDERRUN 0x0D -#define TD_NOTACCESSED 0x0F - - -#define MAXPSW 1 - -struct td { - __u32 hwINFO; - __u32 hwCBP; /* Current Buffer Pointer */ - __u32 hwNextTD; /* Next TD Pointer */ - __u32 hwBE; /* Memory Buffer End Pointer */ - - __u16 hwPSW[MAXPSW]; - __u8 unused; - __u8 index; - struct ed * ed; - struct td * next_dl_td; - struct urb * urb; - - dma_addr_t td_dma; - dma_addr_t data_dma; - __u32 unused2[2]; -} __attribute((aligned(32))); /* normally 16, iso needs 32 */ -typedef struct td td_t; - -#define OHCI_ED_SKIP (1 << 14) - -/* - * The HCCA (Host Controller Communications Area) is a 256 byte - * structure defined in the OHCI spec. that the host controller is - * told the base address of. It must be 256-byte aligned. - */ - -#define NUM_INTS 32 /* part of the OHCI standard */ -struct ohci_hcca { - __u32 int_table[NUM_INTS]; /* Interrupt ED table */ - __u16 frame_no; /* current frame number */ - __u16 pad1; /* set to 0 on each frame_no change */ - __u32 done_head; /* info returned for an interrupt */ - u8 reserved_for_hc[116]; -} __attribute((aligned(256))); - - -/* - * Maximum number of root hub ports. - */ -#define MAX_ROOT_PORTS 15 /* maximum OHCI root hub ports */ - -/* - * This is the structure of the OHCI controller's memory mapped I/O - * region. This is Memory Mapped I/O. You must use the readl() and - * writel() macros defined in asm/io.h to access these!! - */ -struct ohci_regs { - /* control and status registers */ - __u32 revision; - __u32 control; - __u32 cmdstatus; - __u32 intrstatus; - __u32 intrenable; - __u32 intrdisable; - /* memory pointers */ - __u32 hcca; - __u32 ed_periodcurrent; - __u32 ed_controlhead; - __u32 ed_controlcurrent; - __u32 ed_bulkhead; - __u32 ed_bulkcurrent; - __u32 donehead; - /* frame counters */ - __u32 fminterval; - __u32 fmremaining; - __u32 fmnumber; - __u32 periodicstart; - __u32 lsthresh; - /* Root hub ports */ - struct ohci_roothub_regs { - __u32 a; - __u32 b; - __u32 status; - __u32 portstatus[MAX_ROOT_PORTS]; - } roothub; -} __attribute((aligned(32))); - - -/* OHCI CONTROL AND STATUS REGISTER MASKS */ - -/* - * HcControl (control) register masks - */ -#define OHCI_CTRL_CBSR (3 << 0) /* control/bulk service ratio */ -#define OHCI_CTRL_PLE (1 << 2) /* periodic list enable */ -#define OHCI_CTRL_IE (1 << 3) /* isochronous enable */ -#define OHCI_CTRL_CLE (1 << 4) /* control list enable */ -#define OHCI_CTRL_BLE (1 << 5) /* bulk list enable */ -#define OHCI_CTRL_HCFS (3 << 6) /* host controller functional state */ -#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ -#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ -#define OHCI_CTRL_RWE (1 << 10) /* remote wakeup enable */ - -/* pre-shifted values for HCFS */ -# define OHCI_USB_RESET (0 << 6) -# define OHCI_USB_RESUME (1 << 6) -# define OHCI_USB_OPER (2 << 6) -# define OHCI_USB_SUSPEND (3 << 6) - -/* - * HcCommandStatus (cmdstatus) register masks - */ -#define OHCI_HCR (1 << 0) /* host controller reset */ -#define OHCI_CLF (1 << 1) /* control list filled */ -#define OHCI_BLF (1 << 2) /* bulk list filled */ -#define OHCI_OCR (1 << 3) /* ownership change request */ -#define OHCI_SOC (3 << 16) /* scheduling overrun count */ - -/* - * masks used with interrupt registers: - * HcInterruptStatus (intrstatus) - * HcInterruptEnable (intrenable) - * HcInterruptDisable (intrdisable) - */ -#define OHCI_INTR_SO (1 << 0) /* scheduling overrun */ -#define OHCI_INTR_WDH (1 << 1) /* writeback of done_head */ -#define OHCI_INTR_SF (1 << 2) /* start frame */ -#define OHCI_INTR_RD (1 << 3) /* resume detect */ -#define OHCI_INTR_UE (1 << 4) /* unrecoverable error */ -#define OHCI_INTR_FNO (1 << 5) /* frame number overflow */ -#define OHCI_INTR_RHSC (1 << 6) /* root hub status change */ -#define OHCI_INTR_OC (1 << 30) /* ownership change */ -#define OHCI_INTR_MIE (1 << 31) /* master interrupt enable */ - - - -/* Virtual Root HUB */ -struct virt_root_hub { - int devnum; /* Address of Root Hub endpoint */ - void * urb; - void * int_addr; - int send; - int interval; - struct timer_list rh_int_timer; -}; - - -/* USB HUB CONSTANTS (not OHCI-specific; see hub.h) */ - -/* destination of request */ -#define RH_INTERFACE 0x01 -#define RH_ENDPOINT 0x02 -#define RH_OTHER 0x03 - -#define RH_CLASS 0x20 -#define RH_VENDOR 0x40 - -/* Requests: bRequest << 8 | bmRequestType */ -#define RH_GET_STATUS 0x0080 -#define RH_CLEAR_FEATURE 0x0100 -#define RH_SET_FEATURE 0x0300 -#define RH_SET_ADDRESS 0x0500 -#define RH_GET_DESCRIPTOR 0x0680 -#define RH_SET_DESCRIPTOR 0x0700 -#define RH_GET_CONFIGURATION 0x0880 -#define RH_SET_CONFIGURATION 0x0900 -#define RH_GET_STATE 0x0280 -#define RH_GET_INTERFACE 0x0A80 -#define RH_SET_INTERFACE 0x0B00 -#define RH_SYNC_FRAME 0x0C80 -/* Our Vendor Specific Request */ -#define RH_SET_EP 0x2000 - - -/* Hub port features */ -#define RH_PORT_CONNECTION 0x00 -#define RH_PORT_ENABLE 0x01 -#define RH_PORT_SUSPEND 0x02 -#define RH_PORT_OVER_CURRENT 0x03 -#define RH_PORT_RESET 0x04 -#define RH_PORT_POWER 0x08 -#define RH_PORT_LOW_SPEED 0x09 - -#define RH_C_PORT_CONNECTION 0x10 -#define RH_C_PORT_ENABLE 0x11 -#define RH_C_PORT_SUSPEND 0x12 -#define RH_C_PORT_OVER_CURRENT 0x13 -#define RH_C_PORT_RESET 0x14 - -/* Hub features */ -#define RH_C_HUB_LOCAL_POWER 0x00 -#define RH_C_HUB_OVER_CURRENT 0x01 - -#define RH_DEVICE_REMOTE_WAKEUP 0x00 -#define RH_ENDPOINT_STALL 0x01 - -#define RH_ACK 0x01 -#define RH_REQ_ERR -1 -#define RH_NACK 0x00 - - -/* OHCI ROOT HUB REGISTER MASKS */ - -/* roothub.portstatus [i] bits */ -#define RH_PS_CCS 0x00000001 /* current connect status */ -#define RH_PS_PES 0x00000002 /* port enable status*/ -#define RH_PS_PSS 0x00000004 /* port suspend status */ -#define RH_PS_POCI 0x00000008 /* port over current indicator */ -#define RH_PS_PRS 0x00000010 /* port reset status */ -#define RH_PS_PPS 0x00000100 /* port power status */ -#define RH_PS_LSDA 0x00000200 /* low speed device attached */ -#define RH_PS_CSC 0x00010000 /* connect status change */ -#define RH_PS_PESC 0x00020000 /* port enable status change */ -#define RH_PS_PSSC 0x00040000 /* port suspend status change */ -#define RH_PS_OCIC 0x00080000 /* over current indicator change */ -#define RH_PS_PRSC 0x00100000 /* port reset status change */ - -/* roothub.status bits */ -#define RH_HS_LPS 0x00000001 /* local power status */ -#define RH_HS_OCI 0x00000002 /* over current indicator */ -#define RH_HS_DRWE 0x00008000 /* device remote wakeup enable */ -#define RH_HS_LPSC 0x00010000 /* local power status change */ -#define RH_HS_OCIC 0x00020000 /* over current indicator change */ -#define RH_HS_CRWE 0x80000000 /* clear remote wakeup enable */ - -/* roothub.b masks */ -#define RH_B_DR 0x0000ffff /* device removable flags */ -#define RH_B_PPCM 0xffff0000 /* port power control mask */ - -/* roothub.a masks */ -#define RH_A_NDP (0xff << 0) /* number of downstream ports */ -#define RH_A_PSM (1 << 8) /* power switching mode */ -#define RH_A_NPS (1 << 9) /* no power switching */ -#define RH_A_DT (1 << 10) /* device type (mbz) */ -#define RH_A_OCPM (1 << 11) /* over current protection mode */ -#define RH_A_NOCP (1 << 12) /* no over current protection */ -#define RH_A_POTPGT (0xff << 24) /* power on to power good time */ - -/* urb */ -typedef struct -{ - ed_t * ed; - __u16 length; // number of tds associated with this request - __u16 td_cnt; // number of tds already serviced - int state; - wait_queue_head_t * wait; - td_t * td[0]; // list pointer to all corresponding TDs associated with this request - -} urb_priv_t; -#define URB_DEL 1 - - -/* Hash struct used for TD/ED hashing */ -struct hash_t { - void *virt; - dma_addr_t dma; - struct hash_t *next; // chaining for collision cases -}; - -/* List of TD/ED hash entries */ -struct hash_list_t { - struct hash_t *head; - struct hash_t *tail; -}; - -#define TD_HASH_SIZE 64 /* power'o'two */ -#define ED_HASH_SIZE 64 /* power'o'two */ - -#define TD_HASH_FUNC(td_dma) ((td_dma ^ (td_dma >> 5)) % TD_HASH_SIZE) -#define ED_HASH_FUNC(ed_dma) ((ed_dma ^ (ed_dma >> 5)) % ED_HASH_SIZE) - - -/* - * This is the full ohci controller description - * - * Note how the "proper" USB information is just - * a subset of what the full implementation needs. (Linus) - */ - - -typedef struct ohci { - struct ohci_hcca *hcca; /* hcca */ - dma_addr_t hcca_dma; - - int irq; - int disabled; /* e.g. got a UE, we're hung */ - int sleeping; - atomic_t resume_count; /* defending against multiple resumes */ - unsigned long flags; /* for HC bugs */ -#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */ - - struct ohci_regs * regs; /* OHCI controller's memory */ - struct list_head ohci_hcd_list; /* list of all ohci_hcd */ - - struct ohci * next; // chain of ohci device contexts - struct list_head timeout_list; - // struct list_head urb_list; // list of all pending urbs - // spinlock_t urb_list_lock; // lock to keep consistency - - int ohci_int_load[32]; /* load of the 32 Interrupt Chains (for load balancing)*/ - ed_t * ed_rm_list[2]; /* lists of all endpoints to be removed */ - ed_t * ed_bulktail; /* last endpoint of bulk list */ - ed_t * ed_controltail; /* last endpoint of control list */ - ed_t * ed_isotail; /* last endpoint of iso list */ - int intrstatus; - __u32 hc_control; /* copy of the hc control reg */ - struct usb_bus * bus; - struct usb_device * dev[128]; - struct virt_root_hub rh; - - /* PCI device handle, settings, ... */ - struct pci_dev *ohci_dev; - const char *slot_name; - u8 pci_latency; - struct pci_pool *td_cache; - struct pci_pool *dev_cache; - struct hash_list_t td_hash[TD_HASH_SIZE]; - struct hash_list_t ed_hash[ED_HASH_SIZE]; - -} ohci_t; - -#define NUM_EDS 32 /* num of preallocated endpoint descriptors */ - -struct ohci_device { - ed_t ed[NUM_EDS]; - dma_addr_t dma; - int ed_cnt; - wait_queue_head_t * wait; -}; - -// #define ohci_to_usb(ohci) ((ohci)->usb) -#define usb_to_ohci(usb) ((struct ohci_device *)(usb)->hcpriv) - -/* For initializing controller (mask in an HCFS mode too) */ -#define OHCI_CONTROL_INIT \ - (OHCI_CTRL_CBSR & 0x3) | OHCI_CTRL_IE | OHCI_CTRL_PLE - -/* hcd */ -/* endpoint */ -static int ep_link(ohci_t * ohci, ed_t * ed); -static int ep_unlink(ohci_t * ohci, ed_t * ed); -static ed_t * ep_add_ed(struct usb_device * usb_dev, unsigned int pipe, int interval, int load, int mem_flags); -static void ep_rm_ed(struct usb_device * usb_dev, ed_t * ed); -/* td */ -static void td_fill(ohci_t * ohci, unsigned int info, dma_addr_t data, int len, struct urb * urb, int index); -static void td_submit_urb(struct urb * urb); -/* root hub */ -static int rh_submit_urb(struct urb * urb); -static int rh_unlink_urb(struct urb * urb); -static int rh_init_int_timer(struct urb * urb); - -/*-------------------------------------------------------------------------*/ - -#define ALLOC_FLAGS (in_interrupt () ? GFP_ATOMIC : GFP_KERNEL) - -#ifdef DEBUG -# define OHCI_MEM_FLAGS SLAB_POISON -#else -# define OHCI_MEM_FLAGS 0 -#endif - - -/* Recover a TD/ED using its collision chain */ -static void * -dma_to_ed_td (struct hash_list_t * entry, dma_addr_t dma) -{ - struct hash_t * scan = entry->head; - while (scan && scan->dma != dma) - scan = scan->next; - if (!scan) - BUG(); - return scan->virt; -} - -static struct ed * -dma_to_ed (struct ohci * hc, dma_addr_t ed_dma) -{ - return (struct ed *) dma_to_ed_td(&(hc->ed_hash[ED_HASH_FUNC(ed_dma)]), - ed_dma); -} - -static struct td * -dma_to_td (struct ohci * hc, dma_addr_t td_dma) -{ - return (struct td *) dma_to_ed_td(&(hc->td_hash[TD_HASH_FUNC(td_dma)]), - td_dma); -} - -/* Add a hash entry for a TD/ED; return true on success */ -static int -hash_add_ed_td(struct hash_list_t * entry, void * virt, dma_addr_t dma) -{ - struct hash_t * scan; - - scan = (struct hash_t *)kmalloc(sizeof(struct hash_t), ALLOC_FLAGS); - if (!scan) - return 0; - - if (!entry->tail) { - entry->head = entry->tail = scan; - } else { - entry->tail->next = scan; - entry->tail = scan; - } - - scan->virt = virt; - scan->dma = dma; - scan->next = NULL; - return 1; -} - -static int -hash_add_ed (struct ohci * hc, struct ed * ed) -{ - return hash_add_ed_td (&(hc->ed_hash[ED_HASH_FUNC(ed->dma)]), - ed, ed->dma); -} - -static int -hash_add_td (struct ohci * hc, struct td * td) -{ - return hash_add_ed_td (&(hc->td_hash[TD_HASH_FUNC(td->td_dma)]), - td, td->td_dma); -} - - -static void -hash_free_ed_td (struct hash_list_t * entry, void * virt) -{ - struct hash_t *scan, *prev; - scan = prev = entry->head; - - // Find and unlink hash entry - while (scan && scan->virt != virt) { - prev = scan; - scan = scan->next; - } - if (scan) { - if (scan == entry->head) { - if (entry->head == entry->tail) - entry->head = entry->tail = NULL; - else - entry->head = scan->next; - } else if (scan == entry->tail) { - entry->tail = prev; - prev->next = NULL; - } else - prev->next = scan->next; - kfree(scan); - } -} - -static void -hash_free_ed (struct ohci * hc, struct ed * ed) -{ - hash_free_ed_td (&(hc->ed_hash[ED_HASH_FUNC(ed->dma)]), ed); -} - -static void -hash_free_td (struct ohci * hc, struct td * td) -{ - hash_free_ed_td (&(hc->td_hash[TD_HASH_FUNC(td->td_dma)]), td); -} - - -static int ohci_mem_init (struct ohci *ohci) -{ - ohci->td_cache = pci_pool_create ("ohci_td", ohci->ohci_dev, - sizeof (struct td), - 32 /* byte alignment */, - 0 /* no page-crossing issues */, - GFP_KERNEL | OHCI_MEM_FLAGS); - if (!ohci->td_cache) - return -ENOMEM; - ohci->dev_cache = pci_pool_create ("ohci_dev", ohci->ohci_dev, - sizeof (struct ohci_device), - 16 /* byte alignment */, - 0 /* no page-crossing issues */, - GFP_KERNEL | OHCI_MEM_FLAGS); - if (!ohci->dev_cache) - return -ENOMEM; - return 0; -} - -static void ohci_mem_cleanup (struct ohci *ohci) -{ - if (ohci->td_cache) { - pci_pool_destroy (ohci->td_cache); - ohci->td_cache = 0; - } - if (ohci->dev_cache) { - pci_pool_destroy (ohci->dev_cache); - ohci->dev_cache = 0; - } -} - -/* TDs ... */ -static struct td * -td_alloc (struct ohci *hc, int mem_flags) -{ - dma_addr_t dma; - struct td *td; - - td = pci_pool_alloc (hc->td_cache, mem_flags, &dma); - if (td) { - td->td_dma = dma; - - /* hash it for later reverse mapping */ - if (!hash_add_td (hc, td)) { - pci_pool_free (hc->td_cache, td, dma); - return NULL; - } - } - return td; -} - -static inline void -td_free (struct ohci *hc, struct td *td) -{ - hash_free_td (hc, td); - pci_pool_free (hc->td_cache, td, td->td_dma); -} - - -/* DEV + EDs ... only the EDs need to be consistent */ -static struct ohci_device * -dev_alloc (struct ohci *hc, int mem_flags) -{ - dma_addr_t dma; - struct ohci_device *dev; - int i, offset; - - dev = pci_pool_alloc (hc->dev_cache, mem_flags, &dma); - if (dev) { - memset (dev, 0, sizeof (*dev)); - dev->dma = dma; - offset = ((char *)&dev->ed) - ((char *)dev); - for (i = 0; i < NUM_EDS; i++, offset += sizeof dev->ed [0]) - dev->ed [i].dma = dma + offset; - /* add to hashtable if used */ - } - return dev; -} - -static inline void -dev_free (struct ohci *hc, struct ohci_device *dev) -{ - pci_pool_free (hc->dev_cache, dev, dev->dma); -} - -extern spinlock_t usb_ed_lock; -extern void dl_done_list (ohci_t * ohci, td_t * td_list); -extern td_t * dl_reverse_done_list (ohci_t * ohci); - - diff -Nru a/drivers/usb/image/hpusbscsi.c b/drivers/usb/image/hpusbscsi.c --- a/drivers/usb/image/hpusbscsi.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/image/hpusbscsi.c Sat Jul 20 12:12:35 2002 @@ -164,10 +164,10 @@ static struct usb_driver hpusbscsi_usb_driver = { - name:"hpusbscsi", - probe:hpusbscsi_usb_probe, - disconnect:hpusbscsi_usb_disconnect, - id_table:hpusbscsi_usb_ids, + .name ="hpusbscsi", + .probe =hpusbscsi_usb_probe, + .disconnect =hpusbscsi_usb_disconnect, + .id_table =hpusbscsi_usb_ids, }; /* module initialisation */ diff -Nru a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c --- a/drivers/usb/image/mdc800.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/image/mdc800.c Sat Jul 20 12:12:34 2002 @@ -923,11 +923,11 @@ /* File Operations of this drivers */ static struct file_operations mdc800_device_ops = { - owner: THIS_MODULE, - read: mdc800_device_read, - write: mdc800_device_write, - open: mdc800_device_open, - release: mdc800_device_release, + .owner = THIS_MODULE, + .read = mdc800_device_read, + .write = mdc800_device_write, + .open = mdc800_device_open, + .release = mdc800_device_release, }; @@ -943,11 +943,11 @@ */ static struct usb_driver mdc800_usb_driver = { - owner: THIS_MODULE, - name: "mdc800", - probe: mdc800_usb_probe, - disconnect: mdc800_usb_disconnect, - id_table: mdc800_table + .owner = THIS_MODULE, + .name = "mdc800", + .probe = mdc800_usb_probe, + .disconnect = mdc800_usb_disconnect, + .id_table = mdc800_table }; diff -Nru a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c --- a/drivers/usb/image/microtek.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/image/microtek.c Sat Jul 20 12:12:35 2002 @@ -161,10 +161,10 @@ static struct usb_device_id mts_usb_ids []; static struct usb_driver mts_usb_driver = { - name: "microtekX6", - probe: mts_usb_probe, - disconnect: mts_usb_disconnect, - id_table: mts_usb_ids, + .name = "microtekX6", + .probe = mts_usb_probe, + .disconnect = mts_usb_disconnect, + .id_table = mts_usb_ids, }; @@ -743,22 +743,22 @@ static Scsi_Host_Template mts_scsi_host_template = { - name: "microtekX6", - detect: mts_scsi_detect, - release: mts_scsi_release, - queuecommand: mts_scsi_queuecommand, - - eh_abort_handler: mts_scsi_abort, - eh_host_reset_handler: mts_scsi_host_reset, - - sg_tablesize: SG_ALL, - can_queue: 1, - this_id: -1, - cmd_per_lun: 1, - present: 0, - unchecked_isa_dma: FALSE, - use_clustering: TRUE, - emulated: TRUE + .name = "microtekX6", + .detect = mts_scsi_detect, + .release = mts_scsi_release, + .queuecommand = mts_scsi_queuecommand, + + .eh_abort_handler = mts_scsi_abort, + .eh_host_reset_handler =mts_scsi_host_reset, + + .sg_tablesize = SG_ALL, + .can_queue = 1, + .this_id = -1, + .cmd_per_lun = 1, + .present = 0, + .unchecked_isa_dma = FALSE, + .use_clustering = TRUE, + .emulated = TRUE }; diff -Nru a/drivers/usb/image/scanner.c b/drivers/usb/image/scanner.c --- a/drivers/usb/image/scanner.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/image/scanner.c Sat Jul 20 12:12:35 2002 @@ -811,11 +811,11 @@ static struct file_operations usb_scanner_fops = { - read: read_scanner, - write: write_scanner, - ioctl: ioctl_scanner, - open: open_scanner, - release: close_scanner, + .read = read_scanner, + .write = write_scanner, + .ioctl = ioctl_scanner, + .open = open_scanner, + .release = close_scanner, }; static void * @@ -1116,10 +1116,10 @@ static struct usb_driver scanner_driver = { - name: "usbscanner", - probe: probe_scanner, - disconnect: disconnect_scanner, - id_table: NULL, /* This would be scanner_device_ids, but we + .name = "usbscanner", + .probe = probe_scanner, + .disconnect = disconnect_scanner, + .id_table = NULL, /* This would be scanner_device_ids, but we need to check every USB device, in case we match a user defined vendor/product ID. */ }; diff -Nru a/drivers/usb/input/aiptek.c b/drivers/usb/input/aiptek.c --- a/drivers/usb/input/aiptek.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/aiptek.c Sat Jul 20 12:12:35 2002 @@ -313,10 +313,10 @@ } static struct usb_driver aiptek_driver = { - name:"aiptek", - probe:aiptek_probe, - disconnect:aiptek_disconnect, - id_table:aiptek_ids, + .name ="aiptek", + .probe =aiptek_probe, + .disconnect =aiptek_disconnect, + .id_table =aiptek_ids, }; static int __init diff -Nru a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c --- a/drivers/usb/input/hid-core.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/hid-core.c Sat Jul 20 12:12:35 2002 @@ -1556,10 +1556,10 @@ MODULE_DEVICE_TABLE (usb, hid_usb_ids); static struct usb_driver hid_driver = { - name: "hid", - probe: hid_probe, - disconnect: hid_disconnect, - id_table: hid_usb_ids, + .name = "hid", + .probe = hid_probe, + .disconnect = hid_disconnect, + .id_table = hid_usb_ids, }; static int __init hid_init(void) diff -Nru a/drivers/usb/input/hiddev.c b/drivers/usb/input/hiddev.c --- a/drivers/usb/input/hiddev.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/hiddev.c Sat Jul 20 12:12:35 2002 @@ -657,14 +657,14 @@ } static struct file_operations hiddev_fops = { - owner: THIS_MODULE, - read: hiddev_read, - write: hiddev_write, - poll: hiddev_poll, - open: hiddev_open, - release: hiddev_release, - ioctl: hiddev_ioctl, - fasync: hiddev_fasync, + .owner = THIS_MODULE, + .read = hiddev_read, + .write = hiddev_write, + .poll = hiddev_poll, + .open = hiddev_open, + .release = hiddev_release, + .ioctl = hiddev_ioctl, + .fasync = hiddev_fasync, }; /* @@ -759,8 +759,8 @@ static /* const */ struct usb_driver hiddev_driver = { - name: "hiddev", - probe: hiddev_usbd_probe, + .name = "hiddev", + .probe = hiddev_usbd_probe, }; int __init hiddev_init(void) diff -Nru a/drivers/usb/input/powermate.c b/drivers/usb/input/powermate.c --- a/drivers/usb/input/powermate.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/powermate.c Sat Jul 20 12:12:35 2002 @@ -334,10 +334,10 @@ MODULE_DEVICE_TABLE (usb, powermate_devices); static struct usb_driver powermate_driver = { - name: "powermate", - probe: powermate_probe, - disconnect: powermate_disconnect, - id_table: powermate_devices, + .name = "powermate", + .probe = powermate_probe, + .disconnect = powermate_disconnect, + .id_table = powermate_devices, }; int powermate_init(void) diff -Nru a/drivers/usb/input/usbkbd.c b/drivers/usb/input/usbkbd.c --- a/drivers/usb/input/usbkbd.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/usbkbd.c Sat Jul 20 12:12:35 2002 @@ -287,10 +287,10 @@ MODULE_DEVICE_TABLE (usb, usb_kbd_id_table); static struct usb_driver usb_kbd_driver = { - name: "keyboard", - probe: usb_kbd_probe, - disconnect: usb_kbd_disconnect, - id_table: usb_kbd_id_table, + .name = "keyboard", + .probe = usb_kbd_probe, + .disconnect = usb_kbd_disconnect, + .id_table = usb_kbd_id_table, }; static int __init usb_kbd_init(void) diff -Nru a/drivers/usb/input/usbmouse.c b/drivers/usb/input/usbmouse.c --- a/drivers/usb/input/usbmouse.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/usbmouse.c Sat Jul 20 12:12:35 2002 @@ -195,10 +195,10 @@ MODULE_DEVICE_TABLE (usb, usb_mouse_id_table); static struct usb_driver usb_mouse_driver = { - name: "usb_mouse", - probe: usb_mouse_probe, - disconnect: usb_mouse_disconnect, - id_table: usb_mouse_id_table, + .name = "usb_mouse", + .probe = usb_mouse_probe, + .disconnect = usb_mouse_disconnect, + .id_table = usb_mouse_id_table, }; static int __init usb_mouse_init(void) diff -Nru a/drivers/usb/input/wacom.c b/drivers/usb/input/wacom.c --- a/drivers/usb/input/wacom.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/input/wacom.c Sat Jul 20 12:12:34 2002 @@ -428,10 +428,10 @@ } static struct usb_driver wacom_driver = { - name: "wacom", - probe: wacom_probe, - disconnect: wacom_disconnect, - id_table: wacom_ids, + .name = "wacom", + .probe = wacom_probe, + .disconnect = wacom_disconnect, + .id_table = wacom_ids, }; static int __init wacom_init(void) diff -Nru a/drivers/usb/input/xpad.c b/drivers/usb/input/xpad.c --- a/drivers/usb/input/xpad.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/input/xpad.c Sat Jul 20 12:12:35 2002 @@ -304,10 +304,10 @@ } static struct usb_driver xpad_driver = { - name: "xpad", - probe: xpad_probe, - disconnect: xpad_disconnect, - id_table: xpad_table, + .name = "xpad", + .probe = xpad_probe, + .disconnect = xpad_disconnect, + .id_table = xpad_table, }; static int __init usb_xpad_init(void) diff -Nru a/drivers/usb/media/dabusb.c b/drivers/usb/media/dabusb.c --- a/drivers/usb/media/dabusb.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/dabusb.c Sat Jul 20 12:12:35 2002 @@ -704,12 +704,12 @@ static struct file_operations dabusb_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - read: dabusb_read, - ioctl: dabusb_ioctl, - open: dabusb_open, - release: dabusb_release, + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = dabusb_read, + .ioctl = dabusb_ioctl, + .open = dabusb_open, + .release = dabusb_release, }; static int dabusb_find_struct (void) @@ -806,10 +806,10 @@ static struct usb_driver dabusb_driver = { - name: "dabusb", - probe: dabusb_probe, - disconnect: dabusb_disconnect, - id_table: dabusb_ids, + .name = "dabusb", + .probe = dabusb_probe, + .disconnect = dabusb_disconnect, + .id_table = dabusb_ids, }; /* --------------------------------------------------------------------- */ diff -Nru a/drivers/usb/media/dsbr100.c b/drivers/usb/media/dsbr100.c --- a/drivers/usb/media/dsbr100.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/dsbr100.c Sat Jul 20 12:12:35 2002 @@ -100,19 +100,19 @@ static struct file_operations usb_dsbr100_fops = { - owner: THIS_MODULE, - open: usb_dsbr100_open, - release: usb_dsbr100_close, - ioctl: usb_dsbr100_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = usb_dsbr100_open, + .release = usb_dsbr100_close, + .ioctl = usb_dsbr100_ioctl, + .llseek = no_llseek, }; static struct video_device usb_dsbr100_radio= { - owner: THIS_MODULE, - name: "D-Link DSB R-100 USB radio", - type: VID_TYPE_TUNER, - hardware: VID_HARDWARE_AZTECH, - fops: &usb_dsbr100_fops, + .owner = THIS_MODULE, + .name = "D-Link DSB R-100 USB radio", + .type = VID_TYPE_TUNER, + .hardware = VID_HARDWARE_AZTECH, + .fops = &usb_dsbr100_fops, }; static int users = 0; @@ -125,10 +125,10 @@ MODULE_DEVICE_TABLE (usb, usb_dsbr100_table); static struct usb_driver usb_dsbr100_driver = { - name: "dsbr100", - probe: usb_dsbr100_probe, - disconnect: usb_dsbr100_disconnect, - id_table: usb_dsbr100_table, + .name = "dsbr100", + .probe = usb_dsbr100_probe, + .disconnect = usb_dsbr100_disconnect, + .id_table = usb_dsbr100_table, }; diff -Nru a/drivers/usb/media/ov511.c b/drivers/usb/media/ov511.c --- a/drivers/usb/media/ov511.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/ov511.c Sat Jul 20 12:12:35 2002 @@ -9,7 +9,6 @@ * OV7620 fixes by Charl P. Botha * Changes by Claudio Matsuoka * Original SAA7111A code by Dave Perks - * Kernel I2C interface adapted from nt1003 driver * URB error messages from pwc driver by Nemosoft * generic_ioctl() code from videodev.c by Gerd Knorr and Alan Cox * Memory management (rvmalloc) code from bttv driver, by Gerd Knorr and others @@ -61,7 +60,7 @@ /* * Version Information */ -#define DRIVER_VERSION "v1.60a for Linux 2.5" +#define DRIVER_VERSION "v1.61 for Linux 2.5" #define EMAIL "mmcclell@bigfoot.com" #define DRIVER_AUTHOR "Mark McClelland & Bret Wallach \ & Orion Sky Lawlor & Kevin Moore & Charl P. Botha \ @@ -72,7 +71,6 @@ #define ENABLE_Y_QUANTABLE 1 #define ENABLE_UV_QUANTABLE 1 -/* If you change this, you must also change the MODULE_PARM definition */ #define OV511_MAX_UNIT_VIDEO 16 /* Pixel count * bytes per YUV420 pixel (1.5) */ @@ -127,7 +125,6 @@ static int fastset; static int force_palette; -static int tuner = -1; static int backlight; static int unit_video[OV511_MAX_UNIT_VIDEO]; static int remove_zeros; @@ -194,11 +191,9 @@ MODULE_PARM_DESC(fastset, "Allows picture settings to take effect immediately"); MODULE_PARM(force_palette, "i"); MODULE_PARM_DESC(force_palette, "Force the palette to a specific value"); -MODULE_PARM(tuner, "i"); -MODULE_PARM_DESC(tuner, "Set tuner type, if not autodetected"); MODULE_PARM(backlight, "i"); MODULE_PARM_DESC(backlight, "For objects that are lit from behind"); -MODULE_PARM(unit_video, "0-16i"); +MODULE_PARM(unit_video, "1-" __MODULE_STRING(OV511_MAX_UNIT_VIDEO) "i"); MODULE_PARM_DESC(unit_video, "Force use of specific minor number(s). 0 is not allowed."); MODULE_PARM(remove_zeros, "i"); @@ -337,12 +332,14 @@ **********************************************************************/ static void ov51x_clear_snapshot(struct usb_ov511 *); -static int ov51x_check_snapshot(struct usb_ov511 *); -static inline int sensor_get_picture(struct usb_ov511 *, +static inline int sensor_get_picture(struct usb_ov511 *, struct video_picture *); +#if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS) static int sensor_get_exposure(struct usb_ov511 *, unsigned char *); static int ov51x_control_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +static int ov51x_check_snapshot(struct usb_ov511 *); +#endif /********************************************************************** * Memory management @@ -351,7 +348,7 @@ /* Here we want the physical address of the memory. * This is used when initializing the contents of the area. */ -static inline unsigned long +static inline unsigned long kvirt_to_pa(unsigned long adr) { unsigned long kva, ret; @@ -384,7 +381,7 @@ return mem; } -static void +static void rvfree(void *mem, unsigned long size) { unsigned long adr; @@ -412,13 +409,13 @@ extern struct proc_dir_entry *video_proc_entry; static struct file_operations ov511_control_fops = { - ioctl: ov51x_control_ioctl, + .ioctl = ov51x_control_ioctl, }; #define YES_NO(x) ((x) ? "yes" : "no") /* /proc/video/ov511//info */ -static int +static int ov511_read_proc_info(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -490,13 +487,13 @@ * When the camera's button is pressed, the output of this will change from a * 0 to a 1 (ASCII). It will retain this value until it is read, after which * it will reset to zero. - * + * * SECURITY NOTE: Since reading this file can change the state of the snapshot * status, it is important for applications that open it to keep it locked * against access by other processes, using flock() or a similar mechanism. No * locking is provided by this driver. */ -static int +static int ov511_read_proc_button(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -528,7 +525,7 @@ return len; } -static void +static void create_proc_ov511_cam(struct usb_ov511 *ov) { char dirname[10]; @@ -579,11 +576,11 @@ unlock_kernel(); } -static void +static void destroy_proc_ov511_cam(struct usb_ov511 *ov) { char dirname[10]; - + if (!ov || !ov->proc_devdir) return; @@ -616,7 +613,7 @@ ov->proc_devdir = NULL; } -static void +static void proc_ov511_create(void) { /* No current standard here. Alan prefers /proc/video/ as it keeps @@ -637,7 +634,7 @@ err("Unable to create /proc/video/ov511"); } -static void +static void proc_ov511_destroy(void) { PDEBUG(3, "removing /proc/video/ov511"); @@ -656,7 +653,7 @@ **********************************************************************/ /* Write an OV51x register */ -static int +static int reg_w(struct usb_ov511 *ov, unsigned char reg, unsigned char value) { int rc; @@ -669,7 +666,7 @@ usb_sndctrlpipe(ov->dev, 0), (ov->bclass == BCL_OV518)?1:2 /* REG_IO */, USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0, (__u16)reg, &ov->cbuf[0], 1, HZ); + 0, (__u16)reg, &ov->cbuf[0], 1, HZ); up(&ov->cbuf_lock); if (rc < 0) @@ -680,7 +677,7 @@ /* Read from an OV51x register */ /* returns: negative is error, pos or zero is data */ -static int +static int reg_r(struct usb_ov511 *ov, unsigned char reg) { int rc; @@ -691,13 +688,13 @@ (ov->bclass == BCL_OV518)?1:3 /* REG_IO */, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, (__u16)reg, &ov->cbuf[0], 1, HZ); - - PDEBUG(5, "0x%02X:0x%02X", reg, ov->cbuf[0]); - - if (rc < 0) + + if (rc < 0) { err("reg read: error %d: %s", rc, symbolic(urb_errlist, rc)); - else - rc = ov->cbuf[0]; + } else { + rc = ov->cbuf[0]; + PDEBUG(5, "0x%02X:0x%02X", reg, ov->cbuf[0]); + } up(&ov->cbuf_lock); @@ -707,10 +704,10 @@ /* * Writes bits at positions specified by mask to an OV51x reg. Bits that are in * the same position as 1's in "mask" are cleared and set to "value". Bits - * that are in the same position as 0's in "mask" are preserved, regardless + * that are in the same position as 0's in "mask" are preserved, regardless * of their respective state in "value". */ -static int +static int reg_w_mask(struct usb_ov511 *ov, unsigned char reg, unsigned char value, @@ -735,7 +732,7 @@ * Writes multiple (n) byte value to a single register. Only valid with certain * registers (0x30 and 0xc4 - 0xce). */ -static int +static int ov518_reg_w32(struct usb_ov511 *ov, unsigned char reg, u32 val, int n) { int rc; @@ -760,7 +757,7 @@ return rc; } -static int +static int ov511_upload_quan_tables(struct usb_ov511 *ov) { unsigned char *pYTable = yQuanTable511; @@ -770,10 +767,8 @@ PDEBUG(4, "Uploading quantization tables"); - for (i = 0; i < OV511_QUANTABLESIZE / 2; i++) - { - if (ENABLE_Y_QUANTABLE) - { + for (i = 0; i < OV511_QUANTABLESIZE / 2; i++) { + if (ENABLE_Y_QUANTABLE) { val0 = *pYTable++; val1 = *pYTable++; val0 &= 0x0f; @@ -784,8 +779,7 @@ return rc; } - if (ENABLE_UV_QUANTABLE) - { + if (ENABLE_UV_QUANTABLE) { val0 = *pUVTable++; val1 = *pUVTable++; val0 &= 0x0f; @@ -803,7 +797,7 @@ } /* OV518 quantization tables are 8x4 (instead of 8x8) */ -static int +static int ov518_upload_quan_tables(struct usb_ov511 *ov) { unsigned char *pYTable = yQuanTable518; @@ -813,10 +807,8 @@ PDEBUG(4, "Uploading quantization tables"); - for (i = 0; i < OV518_QUANTABLESIZE / 2; i++) - { - if (ENABLE_Y_QUANTABLE) - { + for (i = 0; i < OV518_QUANTABLESIZE / 2; i++) { + if (ENABLE_Y_QUANTABLE) { val0 = *pYTable++; val1 = *pYTable++; val0 &= 0x0f; @@ -827,8 +819,7 @@ return rc; } - if (ENABLE_UV_QUANTABLE) - { + if (ENABLE_UV_QUANTABLE) { val0 = *pUVTable++; val1 = *pUVTable++; val0 &= 0x0f; @@ -845,16 +836,16 @@ return 0; } -static int +static int ov51x_reset(struct usb_ov511 *ov, unsigned char reset_type) { int rc; - + /* Setting bit 0 not allowed on 518/518Plus */ if (ov->bclass == BCL_OV518) reset_type &= 0xfe; - PDEBUG(4, "Reset: type=0x%X", reset_type); + PDEBUG(4, "Reset: type=0x%02X", reset_type); rc = reg_w(ov, R51x_SYS_RESET, reset_type); rc = reg_w(ov, R51x_SYS_RESET, 0); @@ -876,7 +867,7 @@ * This is normally only called from i2c_w(). Note that this function * always succeeds regardless of whether the sensor is present and working. */ -static int +static int ov518_i2c_write_internal(struct usb_ov511 *ov, unsigned char reg, unsigned char value) @@ -901,7 +892,7 @@ } /* NOTE: Do not call this function directly! */ -static int +static int ov511_i2c_write_internal(struct usb_ov511 *ov, unsigned char reg, unsigned char value) @@ -917,7 +908,7 @@ if (rc < 0) return rc; /* Write "value" to I2C data port of OV511 */ - rc = reg_w(ov, R51x_I2C_DATA, value); + rc = reg_w(ov, R51x_I2C_DATA, value); if (rc < 0) return rc; /* Initiate 3-byte write cycle */ @@ -931,7 +922,7 @@ if ((rc&2) == 0) /* Ack? */ break; #if 0 - /* I2C abort */ + /* I2C abort */ reg_w(ov, R511_I2C_CTL, 0x10); #endif if (--retries < 0) { @@ -948,7 +939,7 @@ * This is normally only called from i2c_r(). Note that this function * always succeeds regardless of whether the sensor is present and working. */ -static int +static int ov518_i2c_read_internal(struct usb_ov511 *ov, unsigned char reg) { int rc, value; @@ -974,7 +965,7 @@ /* NOTE: Do not call this function directly! * returns: negative is error, pos or zero is data */ -static int +static int ov511_i2c_read_internal(struct usb_ov511 *ov, unsigned char reg) { int rc, value, retries; @@ -996,7 +987,7 @@ if ((rc&2) == 0) /* Ack? */ break; - /* I2C abort */ + /* I2C abort */ reg_w(ov, R511_I2C_CTL, 0x10); if (--retries < 0) { @@ -1018,7 +1009,7 @@ if ((rc&2) == 0) /* Ack? */ break; - /* I2C abort */ + /* I2C abort */ rc = reg_w(ov, R511_I2C_CTL, 0x10); if (rc < 0) return rc; @@ -1031,17 +1022,17 @@ value = reg_r(ov, R51x_I2C_DATA); PDEBUG(5, "0x%02X:0x%02X", reg, value); - + /* This is needed to make i2c_w() work */ rc = reg_w(ov, R511_I2C_CTL, 0x05); if (rc < 0) return rc; - + return value; } /* returns: negative is error, pos or zero is data */ -static int +static int i2c_r(struct usb_ov511 *ov, unsigned char reg) { int rc; @@ -1058,7 +1049,7 @@ return rc; } -static int +static int i2c_w(struct usb_ov511 *ov, unsigned char reg, unsigned char value) { int rc; @@ -1076,7 +1067,7 @@ } /* Do not call this function directly! */ -static int +static int ov51x_i2c_write_mask_internal(struct usb_ov511 *ov, unsigned char reg, unsigned char value, @@ -1109,10 +1100,10 @@ /* Writes bits at positions specified by mask to an I2C reg. Bits that are in * the same position as 1's in "mask" are cleared and set to "value". Bits - * that are in the same position as 0's in "mask" are preserved, regardless + * that are in the same position as 0's in "mask" are preserved, regardless * of their respective state in "value". */ -static int +static int i2c_w_mask(struct usb_ov511 *ov, unsigned char reg, unsigned char value, @@ -1132,7 +1123,7 @@ * when calling this. This should not be called from outside the i2c I/O * functions. */ -static inline int +static inline int i2c_set_slave_internal(struct usb_ov511 *ov, unsigned char slave) { int rc; @@ -1146,8 +1137,10 @@ return 0; } +#if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS) + /* Write to a specific I2C slave ID and register, using the specified mask */ -static int +static int i2c_w_slave(struct usb_ov511 *ov, unsigned char slave, unsigned char reg, @@ -1166,7 +1159,7 @@ out: /* Restore primary IDs */ - if (i2c_set_slave_internal(ov, ov->primary_i2c_slave) < 0) + if (i2c_set_slave_internal(ov, ov->primary_i2c_slave) < 0) err("Couldn't restore primary I2C slave"); up(&ov->i2c_lock); @@ -1174,7 +1167,7 @@ } /* Read from a specific I2C slave ID and register */ -static int +static int i2c_r_slave(struct usb_ov511 *ov, unsigned char slave, unsigned char reg) @@ -1194,15 +1187,17 @@ out: /* Restore primary IDs */ - if (i2c_set_slave_internal(ov, ov->primary_i2c_slave) < 0) + if (i2c_set_slave_internal(ov, ov->primary_i2c_slave) < 0) err("Couldn't restore primary I2C slave"); up(&ov->i2c_lock); return rc; } +#endif /* defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS) */ + /* Sets I2C read and write slave IDs. Returns <0 for error */ -static int +static int ov51x_set_slave_ids(struct usb_ov511 *ov, unsigned char sid) { int rc; @@ -1221,7 +1216,7 @@ return rc; } -static int +static int write_regvals(struct usb_ov511 *ov, struct ov511_regvals * pRegvals) { int rc; @@ -1242,8 +1237,8 @@ return 0; } -#ifdef OV511_DEBUG -static void +#ifdef OV511_DEBUG +static void dump_i2c_range(struct usb_ov511 *ov, int reg1, int regn) { int i; @@ -1251,18 +1246,18 @@ for (i = reg1; i <= regn; i++) { rc = i2c_r(ov, i); - info("Sensor[0x%X] = 0x%X", i, rc); + info("Sensor[0x%02X] = 0x%02X", i, rc); } } -static void +static void dump_i2c_regs(struct usb_ov511 *ov) { info("I2C REGS"); dump_i2c_range(ov, 0x00, 0x7C); } -static void +static void dump_reg_range(struct usb_ov511 *ov, int reg1, int regn) { int i; @@ -1270,12 +1265,12 @@ for (i = reg1; i <= regn; i++) { rc = reg_r(ov, i); - info("OV511[0x%X] = 0x%X", i, rc); + info("OV511[0x%02X] = 0x%02X", i, rc); } } /* FIXME: Should there be an OV518 version of this? */ -static void +static void ov511_dump_regs(struct usb_ov511 *ov) { info("CAMERA INTERFACE REGS"); @@ -1302,29 +1297,15 @@ } #endif -/********************************************************************** - * - * Kernel I2C Interface (not supported with OV518/OV518+) - * - **********************************************************************/ - -/* For as-yet unimplemented I2C interface */ -static void -call_i2c_clients(struct usb_ov511 *ov, unsigned int cmd, - void *arg) -{ - /* Do nothing */ -} - /*****************************************************************************/ /* Temporarily stops OV511 from functioning. Must do this before changing * registers while the camera is streaming */ -static inline int +static inline int ov51x_stop(struct usb_ov511 *ov) { PDEBUG(4, "stopping"); - ov->stopped = 1; + ov->stopped = 1; if (ov->bclass == BCL_OV518) return (reg_w_mask(ov, R51x_SYS_RESET, 0x3a, 0x3a)); else @@ -1333,12 +1314,12 @@ /* Restarts OV511 after ov511_stop() is called. Has no effect if it is not * actually stopped (for performance). */ -static inline int +static inline int ov51x_restart(struct usb_ov511 *ov) { if (ov->stopped) { PDEBUG(4, "restarting"); - ov->stopped = 0; + ov->stopped = 0; /* Reinitialize the stream */ if (ov->bclass == BCL_OV518) @@ -1351,7 +1332,7 @@ } /* Resets the hardware snapshot button */ -static void +static void ov51x_clear_snapshot(struct usb_ov511 *ov) { if (ov->bclass == BCL_OV511) { @@ -1363,12 +1344,12 @@ } else { err("clear snap: invalid bridge type"); } - } +#if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS) /* Checks the status of the snapshot button. Returns 1 if it was pressed since * it was last cleared, and zero in all other cases (including errors) */ -static int +static int ov51x_check_snapshot(struct usb_ov511 *ov) { int ret, status = 0; @@ -1388,19 +1369,20 @@ return status; } +#endif /* This does an initial reset of an OmniVision sensor and ensures that I2C * is synchronized. Returns <0 for failure. */ -static int +static int init_ov_sensor(struct usb_ov511 *ov) { int i, success; - /* Reset the sensor */ + /* Reset the sensor */ if (i2c_w(ov, 0x12, 0x80) < 0) return -EIO; - /* Wait for it to initialize */ + /* Wait for it to initialize */ schedule_timeout (1 + 150 * HZ / 1000); for (i = 0, success = 0; i < i2c_detect_tries && !success; i++) { @@ -1410,9 +1392,9 @@ continue; } - /* Reset the sensor */ + /* Reset the sensor */ if (i2c_w(ov, 0x12, 0x80) < 0) return -EIO; - /* Wait for it to initialize */ + /* Wait for it to initialize */ schedule_timeout(1 + 150 * HZ / 1000); /* Dummy read to sync I2C */ if (i2c_r(ov, 0x00) < 0) return -EIO; @@ -1420,13 +1402,13 @@ if (!success) return -EIO; - + PDEBUG(1, "I2C synced in %d attempt(s)", i); return 0; } -static int +static int ov511_set_packet_size(struct usb_ov511 *ov, int size) { int alt, mult; @@ -1468,7 +1450,7 @@ if (reg_w(ov, R51x_FIFO_PSIZE, mult) < 0) return -EIO; - + if (usb_set_interface(ov->dev, ov->iface, alt) < 0) { err("Set packet size: set interface error"); return -EBUSY; @@ -1488,7 +1470,7 @@ /* Note: Unlike the OV511/OV511+, the size argument does NOT include the * optional packet number byte. The actual size *is* stored in ov->packet_size, * though. */ -static int +static int ov518_set_packet_size(struct usb_ov511 *ov, int size) { int alt; @@ -1550,7 +1532,6 @@ int rc = 0; if (!ov->compress_inited) { - reg_w(ov, 0x70, phy); reg_w(ov, 0x71, phuv); reg_w(ov, 0x72, pvy); @@ -1568,7 +1549,7 @@ } ov->compress_inited = 1; -out: +out: return rc; } @@ -1579,7 +1560,6 @@ int rc = 0; if (!ov->compress_inited) { - if (ov518_upload_quan_tables(ov) < 0) { err("Error uploading quantization tables"); rc = -EIO; @@ -1588,7 +1568,7 @@ } ov->compress_inited = 1; -out: +out: return rc; } @@ -2130,7 +2110,7 @@ #endif /* CONFIG_PROC_FS && CONFIG_VIDEO_PROC_FS */ /* Turns on or off the LED. Only has an effect with OV511+/OV518(+) */ -static inline void +static inline void ov51x_led_control(struct usb_ov511 *ov, int enable) { PDEBUG(4, " (%s)", enable ? "turn on" : "turn off"); @@ -2181,7 +2161,7 @@ i2c_w_mask(ov, 0x2a, sixty?0x00:0x80, 0x80); i2c_w(ov, 0x2b, sixty?0x00:0xac); i2c_w_mask(ov, 0x76, 0x01, 0x01); - break; + break; case SEN_OV6620: case SEN_OV6630: i2c_w(ov, 0x2b, sixty?0xa8:0x28); @@ -2269,7 +2249,7 @@ */ static inline int sensor_set_auto_exposure(struct usb_ov511 *ov, int enable) -{ +{ PDEBUG(4, " (%s)", enable ? "turn on" : "turn off"); switch (ov->sensor) { @@ -2281,7 +2261,7 @@ case SEN_OV76BE: case SEN_OV8600: i2c_w_mask(ov, 0x13, enable?0x01:0x00, 0x01); - break; + break; case SEN_OV6630: i2c_w_mask(ov, 0x28, enable?0x00:0x10, 0x10); break; @@ -2318,7 +2298,7 @@ i2c_w_mask(ov, 0x68, enable?0xe0:0xc0, 0xe0); i2c_w_mask(ov, 0x29, enable?0x08:0x00, 0x08); i2c_w_mask(ov, 0x28, enable?0x02:0x00, 0x02); - break; + break; case SEN_OV6620: i2c_w_mask(ov, 0x4e, enable?0xe0:0xc0, 0xe0); i2c_w_mask(ov, 0x29, enable?0x08:0x00, 0x08); @@ -2378,7 +2358,7 @@ /* Returns number of bits per pixel (regardless of where they are located; * planar or not), or zero for unsupported format. */ -static inline int +static inline int get_depth(int palette) { switch (palette) { @@ -2390,7 +2370,7 @@ } /* Bytes per frame. Used by read(). Return of 0 indicates error */ -static inline long int +static inline long int get_frame_length(struct ov511_frame *frame) { if (!frame) @@ -2785,12 +2765,12 @@ return -EINVAL; } else { hi_res = 0; - } + } if (ov51x_stop(ov) < 0) return -EIO; - /******** Set the mode ********/ + /******** Set the mode ********/ reg_w(ov, 0x2b, 0); reg_w(ov, 0x2c, 0); @@ -2906,10 +2886,10 @@ rc = -EINVAL; break; case SEN_SAA7111A: -// rc = mode_init_saa_sensor_regs(ov, width, height, mode, +// rc = mode_init_saa_sensor_regs(ov, width, height, mode, // sub_flag); - PDEBUG(1, "SAA status = 0X%x", i2c_r(ov, 0x1f)); + PDEBUG(1, "SAA status = 0x%02X", i2c_r(ov, 0x1f)); break; default: err("Unknown sensor"); @@ -2952,7 +2932,7 @@ /* This sets the default image parameters. This is useful for apps that use * read() and do not set these. */ -static int +static int ov51x_set_default_params(struct usb_ov511 *ov) { int i; @@ -2988,7 +2968,7 @@ **********************************************************************/ /* Set analog input port of decoder */ -static int +static int decoder_set_input(struct usb_ov511 *ov, int input) { PDEBUG(4, "port %d", input); @@ -3010,7 +2990,7 @@ } /* Get ASCII name of video input */ -static int +static int decoder_get_input_name(struct usb_ov511 *ov, int input, char *name) { switch (ov->sensor) { @@ -3022,7 +3002,6 @@ sprintf(name, "CVBS-%d", input); else // if (input < 8) sprintf(name, "S-Video-%d", input - 4); - break; } default: @@ -3033,7 +3012,7 @@ } /* Set norm (NTSC, PAL, SECAM, AUTO) */ -static int +static int decoder_set_norm(struct usb_ov511 *ov, int norm) { PDEBUG(4, "%d", norm); @@ -3048,7 +3027,7 @@ reg_e = 0x00; /* NTSC M / PAL BGHI */ } else if (norm == VIDEO_MODE_PAL) { reg_8 = 0x00; /* 50 Hz */ - reg_e = 0x00; /* NTSC M / PAL BGHI */ + reg_e = 0x00; /* NTSC M / PAL BGHI */ } else if (norm == VIDEO_MODE_AUTO) { reg_8 = 0x80; /* Auto field detect */ reg_e = 0x00; /* NTSC M / PAL BGHI */ @@ -3079,7 +3058,7 @@ /* Copies a 64-byte segment at pIn to an 8x8 block at pOut. The width of the * image at pOut is specified by w. */ -static inline void +static inline void make_8x8(unsigned char *pIn, unsigned char *pOut, int w) { unsigned char *pOut1 = pOut; @@ -3092,7 +3071,6 @@ } pOut += w; } - } /* @@ -3214,7 +3192,7 @@ * accordingly. Returns -ENXIO if decompressor is not available, otherwise * returns 0 if no other error. */ -static int +static int request_decompressor(struct usb_ov511 *ov) { if (!ov) @@ -3270,7 +3248,7 @@ /* Unlocks decompression module and nulls ov->decomp_ops. Safe to call even * if ov->decomp_ops is NULL. */ -static void +static void release_decompressor(struct usb_ov511 *ov) { int released = 0; /* Did we actually do anything? */ @@ -3286,14 +3264,14 @@ } ov->decomp_ops = NULL; - + unlock_kernel(); if (released) PDEBUG(3, "Decompressor released"); } -static void +static void decompress(struct usb_ov511 *ov, struct ov511_frame *frame, unsigned char *pIn0, unsigned char *pOut0) { @@ -3303,7 +3281,7 @@ PDEBUG(4, "Decompressing %d bytes", frame->bytes_recvd); - if (frame->format == VIDEO_PALETTE_GREY + if (frame->format == VIDEO_PALETTE_GREY && ov->decomp_ops->decomp_400) { int ret = ov->decomp_ops->decomp_400( pIn0, @@ -3313,7 +3291,7 @@ frame->rawheight, frame->bytes_recvd); PDEBUG(4, "DEBUG: decomp_400 returned %d", ret); - } else if (frame->format != VIDEO_PALETTE_GREY + } else if (frame->format != VIDEO_PALETTE_GREY && ov->decomp_ops->decomp_420) { int ret = ov->decomp_ops->decomp_420( pIn0, @@ -3328,6 +3306,12 @@ } } +/********************************************************************** + * + * Format conversion + * + **********************************************************************/ + /* Fuses even and odd fields together, and doubles width. * INPUT: an odd field followed by an even field at pIn0, in YUV planar format * OUTPUT: a normal YUV planar image, with correct aspect ratio @@ -3432,7 +3416,7 @@ if (frame->compressed) decompress(ov, frame, frame->rawdata, frame->tempdata); else - yuv420raw_to_yuv420p(frame, frame->rawdata, + yuv420raw_to_yuv420p(frame, frame->rawdata, frame->tempdata); deinterlace(frame, RAWFMT_YUV420, frame->tempdata, @@ -3452,11 +3436,11 @@ * 3. Convert from YUV planar to destination format, if necessary * 4. Fix the RGB offset, if necessary */ -static void +static void ov51x_postprocess(struct usb_ov511 *ov, struct ov511_frame *frame) { if (dumppix) { - memset(frame->data, 0, + memset(frame->data, 0, MAX_DATA_SIZE(ov->maxwidth, ov->maxheight)); PDEBUG(4, "Dumping %d bytes", frame->bytes_recvd); memcpy(frame->data, frame->rawdata, frame->bytes_recvd); @@ -3482,7 +3466,7 @@ * **********************************************************************/ -static inline void +static inline void ov511_move_data(struct usb_ov511 *ov, unsigned char *in, int n) { int num, offset; @@ -3518,7 +3502,7 @@ /* Frame end */ if (in[8] & 0x80) { - ts = (struct timeval *)(frame->data + ts = (struct timeval *)(frame->data + MAX_FRAME_SIZE(ov->maxwidth, ov->maxheight)); do_gettimeofday(ts); @@ -3656,7 +3640,7 @@ } } -static inline void +static inline void ov518_move_data(struct usb_ov511 *ov, unsigned char *in, int n) { int max_raw = MAX_RAW_DATA_SIZE(ov->maxwidth, ov->maxheight); @@ -3796,7 +3780,7 @@ } else { if (frame->bytes_recvd + copied + 8 <= max_raw) { - memcpy(frame->rawdata + memcpy(frame->rawdata + frame->bytes_recvd + copied, in + read, 8); copied += 8; @@ -3810,7 +3794,7 @@ } } -static void +static void ov51x_isoc_irq(struct urb *urb) { int i; @@ -3896,7 +3880,7 @@ * ***************************************************************************/ -static int +static int ov51x_init_isoc(struct usb_ov511 *ov) { struct urb *urb; @@ -3991,7 +3975,7 @@ return 0; } -static void +static void ov51x_unlink_isoc(struct usb_ov511 *ov) { int n; @@ -4006,7 +3990,7 @@ } } -static void +static void ov51x_stop_isoc(struct usb_ov511 *ov) { if (!ov->streaming || !ov->dev) @@ -4024,7 +4008,7 @@ ov51x_unlink_isoc(ov); } -static int +static int ov51x_new_frame(struct usb_ov511 *ov, int framenum) { struct ov511_frame *frame; @@ -4046,7 +4030,7 @@ frame = &ov->frame[framenum]; - PDEBUG(4, "framenum = %d, width = %d, height = %d", framenum, + PDEBUG(4, "framenum = %d, width = %d, height = %d", framenum, frame->width, frame->height); frame->grabstate = FRAME_GRABBING; @@ -4075,12 +4059,12 @@ * ***************************************************************************/ -/* +/* * - You must acquire buf_lock before entering this function. * - Because this code will free any non-null pointer, you must be sure to null * them if you explicitly free them somewhere else! */ -static void +static void ov51x_do_dealloc(struct usb_ov511 *ov) { int i; @@ -4124,7 +4108,7 @@ PDEBUG(4, "leaving"); } -static int +static int ov51x_alloc(struct usb_ov511 *ov) { int i; @@ -4171,12 +4155,12 @@ for (i = 0; i < OV511_NUMFRAMES; i++) { ov->frame[i].data = ov->fbuf + i * MAX_DATA_SIZE(w, h); - ov->frame[i].rawdata = ov->rawfbuf + ov->frame[i].rawdata = ov->rawfbuf + i * MAX_RAW_DATA_SIZE(w, h); - ov->frame[i].tempdata = ov->tempfbuf + ov->frame[i].tempdata = ov->tempfbuf + i * MAX_RAW_DATA_SIZE(w, h); - ov->frame[i].compbuf = + ov->frame[i].compbuf = (unsigned char *) __get_free_page(GFP_KERNEL); if (!ov->frame[i].compbuf) goto error; @@ -4196,7 +4180,7 @@ return -ENOMEM; } -static void +static void ov51x_dealloc(struct usb_ov511 *ov, int now) { PDEBUG(4, "entered"); @@ -4212,7 +4196,7 @@ * ***************************************************************************/ -static int +static int ov51x_v4l1_open(struct inode *inode, struct file *file) { struct video_device *vdev = video_devdata(file); @@ -4224,17 +4208,18 @@ down(&ov->lock); err = -EBUSY; - if (ov->user) + if (ov->user) goto out; - err = -ENOMEM; - if (ov51x_alloc(ov)) + err = ov51x_alloc(ov); + if (err < 0) goto out; ov->sub_flag = 0; /* In case app doesn't set them... */ - if (ov51x_set_default_params(ov) < 0) + err = ov51x_set_default_params(ov); + if (err < 0) goto out; /* Make sure frames are reset */ @@ -4243,7 +4228,7 @@ ov->frame[i].bytes_read = 0; } - /* If compression is on, make sure now that a + /* If compression is on, make sure now that a * decompressor can be loaded */ if (ov->compress && !ov->decomp_ops) { err = request_decompressor(ov); @@ -4268,14 +4253,14 @@ return err; } -static int +static int ov51x_v4l1_close(struct inode *inode, struct file *file) { struct video_device *vdev = file->private_data; struct usb_ov511 *ov = vdev->priv; PDEBUG(4, "ov511_close"); - + down(&ov->lock); ov->user--; @@ -4303,6 +4288,7 @@ kfree(ov); ov = NULL; } + file->private_data = NULL; return 0; @@ -4318,7 +4304,7 @@ PDEBUG(5, "IOCtl: 0x%X", cmd); if (!ov->dev) - return -EIO; + return -EIO; switch (cmd) { case VIDIOCGCAP: @@ -4331,10 +4317,8 @@ sprintf(b->name, "%s USB Camera", symbolic(brglist, ov->bridge)); b->type = VID_TYPE_CAPTURE | VID_TYPE_SUBCAPTURE; - if (ov->has_tuner) - b->type |= VID_TYPE_TUNER; b->channels = ov->num_inputs; - b->audios = ov->has_audio_proc ? 1:0; + b->audios = 0; b->maxwidth = ov->maxwidth; b->maxheight = ov->maxheight; b->minwidth = ov->minwidth; @@ -4354,11 +4338,10 @@ } v->norm = ov->norm; - v->type = (ov->has_tuner) ? VIDEO_TYPE_TV : VIDEO_TYPE_CAMERA; - v->flags = (ov->has_tuner) ? VIDEO_VC_TUNER : 0; - v->flags |= (ov->has_audio_proc) ? VIDEO_VC_AUDIO : 0; + v->type = VIDEO_TYPE_CAMERA; + v->flags = 0; // v->flags |= (ov->has_decoder) ? VIDEO_VC_NORM : 0; - v->tuners = (ov->has_tuner) ? 1:0; + v->tuners = 0; decoder_get_input_name(ov, v->channel, v->name); return 0; @@ -4585,7 +4568,7 @@ return -EINVAL; } - if (vm->width > ov->maxwidth + if (vm->width > ov->maxwidth || vm->height > ov->maxheight) { err("VIDIOCMCAPTURE: requested dimensions too big"); return -EINVAL; @@ -4641,7 +4624,6 @@ struct ov511_frame *frame; int rc; - if (fnum >= OV511_NUMFRAMES) { err("VIDIOCSYNC: invalid frame (%d)", fnum); return -EINVAL; @@ -4676,7 +4658,7 @@ return ret; goto redo; } - /* Fall through */ + /* Fall through */ case FRAME_DONE: if (ov->snap_enabled && !frame->snapshot) { int ret; @@ -4728,92 +4710,6 @@ return 0; } - case VIDIOCGTUNER: - { - struct video_tuner *v = arg; - - PDEBUG(4, "VIDIOCGTUNER"); - - if (!ov->has_tuner || v->tuner) // Only tuner 0 - return -EINVAL; - - strcpy(v->name, "Television"); - - // FIXME: Need a way to get the real values - v->rangelow = 0; - v->rangehigh = ~0; - - v->flags = VIDEO_TUNER_PAL | VIDEO_TUNER_NTSC - | VIDEO_TUNER_SECAM; - v->mode = 0; /* FIXME: Not sure what this is yet */ - v->signal = 0xFFFF; /* unknown */ - - call_i2c_clients(ov, cmd, v); - - return 0; - } - case VIDIOCSTUNER: - { - struct video_tuner *v = arg; - int err; - - PDEBUG(4, "VIDIOCSTUNER"); - - /* Only no or one tuner for now */ - if (!ov->has_tuner || v->tuner) - return -EINVAL; - - /* and it only has certain valid modes */ - if (v->mode != VIDEO_MODE_PAL && - v->mode != VIDEO_MODE_NTSC && - v->mode != VIDEO_MODE_SECAM) - return -EOPNOTSUPP; - - /* Is this right/necessary? */ - err = decoder_set_norm(ov, v->mode); - if (err) - return err; - - call_i2c_clients(ov, cmd, v); - - return 0; - } - case VIDIOCGFREQ: - { - unsigned long v = *((unsigned long *) arg); - - PDEBUG(4, "VIDIOCGFREQ"); - - if (!ov->has_tuner) - return -EINVAL; - - v = ov->freq; -#if 0 - /* FIXME: this is necessary for testing */ - v = 46*16; -#endif - return 0; - } - case VIDIOCSFREQ: - { - unsigned long v = *((unsigned long *) arg); - - PDEBUG(4, "VIDIOCSFREQ: %lx", v); - - if (!ov->has_tuner) - return -EINVAL; - - ov->freq = v; - call_i2c_clients(ov, cmd, &v); - - return 0; - } - case VIDIOCGAUDIO: - case VIDIOCSAUDIO: - { - /* FIXME: Implement this... */ - return 0; - } default: PDEBUG(3, "Unsupported IOCtl: 0x%X", cmd); return -ENOIOCTLCMD; @@ -4822,7 +4718,7 @@ return 0; } -static int +static int ov51x_v4l1_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { @@ -4839,7 +4735,7 @@ return rc; } -static inline int +static inline int ov51x_v4l1_read(struct file *file, char *buf, size_t cnt, loff_t *ppos) { struct video_device *vdev = file->private_data; @@ -4904,7 +4800,7 @@ /* Wait while we're grabbing the image */ PDEBUG(4, "Waiting image grabbing"); - rc = wait_event_interruptible(frame->wq, + rc = wait_event_interruptible(frame->wq, (frame->grabstate == FRAME_DONE) || (frame->grabstate == FRAME_ERROR)); @@ -4951,7 +4847,7 @@ get_frame_length(frame)); /* copy bytes to user space; we allow for partials reads */ -// if ((count + frame->bytes_read) +// if ((count + frame->bytes_read) // > get_frame_length((struct ov511_frame *)frame)) // count = frame->scanlength - frame->bytes_read; @@ -5036,25 +4932,25 @@ } static struct file_operations ov511_fops = { - owner: THIS_MODULE, - open: ov51x_v4l1_open, - release: ov51x_v4l1_close, - read: ov51x_v4l1_read, - mmap: ov51x_v4l1_mmap, - ioctl: ov51x_v4l1_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = ov51x_v4l1_open, + .release = ov51x_v4l1_close, + .read = ov51x_v4l1_read, + .mmap = ov51x_v4l1_mmap, + .ioctl = ov51x_v4l1_ioctl, + .llseek = no_llseek, }; static struct video_device vdev_template = { - owner: THIS_MODULE, - name: "OV511 USB Camera", - type: VID_TYPE_CAPTURE, - hardware: VID_HARDWARE_OV511, - fops: &ov511_fops, + .owner = THIS_MODULE, + .name = "OV511 USB Camera", + .type = VID_TYPE_CAPTURE, + .hardware = VID_HARDWARE_OV511, + .fops = &ov511_fops, }; #if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS) -static int +static int ov51x_control_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long ularg) { @@ -5278,7 +5174,7 @@ /* This initializes the OV7610, OV7620, or OV76BE sensor. The OV76BE uses * the same register settings as the OV7610, since they are very similar. */ -static int +static int ov7xx0_configure(struct usb_ov511 *ov) { int i, success; @@ -5429,7 +5325,7 @@ err("this to " EMAIL); err("This is only a warning. You can attempt to use"); err("your camera anyway"); -// Only issue a warning for now +// Only issue a warning for now // return -1; } else { PDEBUG(1, "OV7xx0 initialized (method 2, %dx)", i+1); @@ -5449,8 +5345,6 @@ /* I don't know what's different about the 76BE yet. */ if (i2c_r(ov, 0x15) & 1) { info("Sensor is an OV7620AE"); - info("PLEASE REPORT THE EXISTENCE OF THIS SENSOR TO"); - info("THE DRIVER AUTHOR"); } else { info("Sensor is an OV76BE"); } @@ -5498,7 +5392,7 @@ } /* This initializes the OV6620, OV6630, OV6630AE, or OV6630AF sensor. */ -static int +static int ov6xx0_configure(struct usb_ov511 *ov) { int rc; @@ -5513,7 +5407,7 @@ { OV511_I2C_BUS, 0x0c, 0x24 }, { OV511_I2C_BUS, 0x0d, 0x24 }, { OV511_I2C_BUS, 0x0f, 0x15 }, /* COMS */ - { OV511_I2C_BUS, 0x10, 0x75 }, /* AEC Exposure time */ + { OV511_I2C_BUS, 0x10, 0x75 }, /* AEC Exposure time */ { OV511_I2C_BUS, 0x12, 0x24 }, /* Enable AGC */ { OV511_I2C_BUS, 0x14, 0x04 }, /* 0x16: 0x06 helps frame stability with moving objects */ @@ -5525,11 +5419,11 @@ { OV511_I2C_BUS, 0x2a, 0x04 }, /* Disable framerate adjust */ // { OV511_I2C_BUS, 0x2b, 0xac }, /* Framerate; Set 2a[7] first */ { OV511_I2C_BUS, 0x2d, 0x99 }, - { OV511_I2C_BUS, 0x33, 0xa0 }, /* Color Procesing Parameter */ + { OV511_I2C_BUS, 0x33, 0xa0 }, /* Color Procesing Parameter */ { OV511_I2C_BUS, 0x34, 0xd2 }, /* Max A/D range */ { OV511_I2C_BUS, 0x38, 0x8b }, { OV511_I2C_BUS, 0x39, 0x40 }, - + { OV511_I2C_BUS, 0x3c, 0x39 }, /* Enable AEC mode changing */ { OV511_I2C_BUS, 0x3c, 0x3c }, /* Change AEC mode */ { OV511_I2C_BUS, 0x3c, 0x24 }, /* Disable AEC mode changing */ @@ -5609,7 +5503,7 @@ * control the color balance */ // /*OK?*/ { OV511_I2C_BUS, 0x4a, 0x80 }, // Check these // /*OK?*/ { OV511_I2C_BUS, 0x4b, 0x80 }, -// /*U*/ { OV511_I2C_BUS, 0x4c, 0xd0 }, +// /*U*/ { OV511_I2C_BUS, 0x4c, 0xd0 }, /*d2?*/ { OV511_I2C_BUS, 0x4d, 0x10 }, /* This reduces noise a bit */ /*c1?*/ { OV511_I2C_BUS, 0x4e, 0x40 }, /*04?*/ { OV511_I2C_BUS, 0x4f, 0x07 }, @@ -5627,7 +5521,7 @@ }; PDEBUG(4, "starting sensor configuration"); - + if (init_ov_sensor(ov) < 0) { err("Failed to read sensor ID. You might not have an OV6xx0,"); err("or it may be not responding. Report this to " EMAIL); @@ -5642,7 +5536,7 @@ if (rc < 0) { err("Error detecting sensor type"); return -1; - } + } if ((rc & 3) == 0) ov->sensor = SEN_OV6630; @@ -5676,7 +5570,7 @@ if (write_regvals(ov, aRegvalsNorm6x30)) return -1; } - + return 0; } @@ -5738,7 +5632,7 @@ } /* This initializes the SAA7111A video decoder. */ -static int +static int saa7111a_configure(struct usb_ov511 *ov) { int rc; @@ -5890,12 +5784,8 @@ err("Also include the output of the detection process."); } - if (ov->customid == 6) { /* USB Life TV (NTSC) */ - ov->tuner_type = 8; /* Temic 4036FY5 3X 1981 */ - } else if (ov->customid == 70) { /* USB Life TV (PAL/SECAM) */ - ov->tuner_type = 3; /* Philips FI1216MF */ + if (ov->customid == 70) /* USB Life TV (PAL/SECAM) */ ov->pal = 1; - } if (write_regvals(ov, aRegvalsInit511)) goto error; @@ -5917,7 +5807,7 @@ ov->packet_numbering = 1; ov511_set_packet_size(ov, 0); - ov->snap_enabled = snapshot; + ov->snap_enabled = snapshot; /* Test for 7xx0 */ PDEBUG(3, "Testing for 0V7xx0"); @@ -5994,7 +5884,7 @@ } /* This initializes the OV518/OV518+ and the sensor */ -static int +static int ov518_configure(struct usb_ov511 *ov) { /* For 518 and 518+ */ @@ -6190,7 +6080,6 @@ ov->lightfreq = lightfreq; ov->num_inputs = 1; /* Video decoder init functs. change this */ ov->stop_during_set = !fastset; - ov->tuner_type = tuner; ov->backlight = backlight; ov->mirror = mirror; ov->auto_brt = autobright; @@ -6221,7 +6110,7 @@ ov->bclass = BCL_OV511; break; default: - err("Unknown product ID 0x%x", dev->descriptor.idProduct); + err("Unknown product ID 0x%04x", dev->descriptor.idProduct); goto error_dealloc; } @@ -6373,11 +6262,11 @@ } static struct usb_driver ov511_driver = { - owner: THIS_MODULE, - name: "ov511", - id_table: device_table, - probe: ov51x_probe, - disconnect: ov51x_disconnect + .owner = THIS_MODULE, + .name = "ov511", + .id_table = device_table, + .probe = ov51x_probe, + .disconnect = ov51x_disconnect }; @@ -6388,7 +6277,7 @@ ***************************************************************************/ /* Returns 0 for success */ -int +int ov511_register_decomp_module(int ver, struct ov51x_decomp_ops *ops, int ov518, int mmx) { @@ -6445,7 +6334,7 @@ return -EBUSY; } -void +void ov511_deregister_decomp_module(int ov518, int mmx) { lock_kernel(); @@ -6461,13 +6350,13 @@ else ov511_decomp_ops = NULL; } - + MOD_DEC_USE_COUNT; unlock_kernel(); } -static int __init +static int __init usb_ov511_init(void) { #if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS) @@ -6482,7 +6371,7 @@ return 0; } -static void __exit +static void __exit usb_ov511_exit(void) { usb_deregister(&ov511_driver); diff -Nru a/drivers/usb/media/ov511.h b/drivers/usb/media/ov511.h --- a/drivers/usb/media/ov511.h Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/ov511.h Sat Jul 20 12:12:35 2002 @@ -551,17 +551,11 @@ int num_inputs; /* Number of inputs */ int norm; /* NTSC / PAL / SECAM */ int has_decoder; /* Device has a video decoder */ - int has_tuner; /* Device has a TV tuner */ - int has_audio_proc; /* Device has an audio processor */ - int freq; /* Current tuner frequency */ - int tuner_type; /* Specific tuner model */ int pal; /* Device is designed for PAL resolution */ - /* I2C interface to kernel */ + /* I2C interface */ struct semaphore i2c_lock; /* Protect I2C controller regs */ unsigned char primary_i2c_slave; /* I2C write id of sensor */ - unsigned char tuner_i2c_slave; /* I2C write id of tuner */ - unsigned char audio_i2c_slave; /* I2C write id of audio processor */ /* Control transaction stuff */ unsigned char *cbuf; /* Buffer for payload */ diff -Nru a/drivers/usb/media/pwc-if.c b/drivers/usb/media/pwc-if.c --- a/drivers/usb/media/pwc-if.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/pwc-if.c Sat Jul 20 12:12:35 2002 @@ -91,10 +91,10 @@ static struct usb_driver pwc_driver = { - name: "Philips webcam", /* name */ - id_table: pwc_device_table, - probe: usb_pwc_probe, /* probe() */ - disconnect: usb_pwc_disconnect, /* disconnect() */ + .name = "Philips webcam", /* name */ + .id_table = pwc_device_table, + .probe = usb_pwc_probe, /* probe() */ + .disconnect = usb_pwc_disconnect, /* disconnect() */ }; #define MAX_DEV_HINTS 10 @@ -130,21 +130,21 @@ static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma); static struct file_operations pwc_fops = { - owner: THIS_MODULE, - open: pwc_video_open, - release: pwc_video_close, - read: pwc_video_read, - poll: pwc_video_poll, - mmap: pwc_video_mmap, - ioctl: pwc_video_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = pwc_video_open, + .release = pwc_video_close, + .read = pwc_video_read, + .poll = pwc_video_poll, + .mmap = pwc_video_mmap, + .ioctl = pwc_video_ioctl, + .llseek = no_llseek, }; static struct video_device pwc_template = { - owner: THIS_MODULE, - name: "Philips Webcam", /* Filled in later */ - type: VID_TYPE_CAPTURE, - hardware: VID_HARDWARE_PWC, - fops: &pwc_fops, + .owner = THIS_MODULE, + .name = "Philips Webcam", /* Filled in later */ + .type = VID_TYPE_CAPTURE, + .hardware = VID_HARDWARE_PWC, + .fops = &pwc_fops, }; /***************************************************************************/ @@ -1756,40 +1756,40 @@ pdev = (struct pwc_device *)ptr; if (pdev == NULL) { Err("pwc_disconnect() Called without private pointer.\n"); - return; + goto out_err; } if (pdev->udev == NULL) { Err("pwc_disconnect() already called for %p\n", pdev); - return; + goto out_err; } if (pdev->udev != udev) { Err("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n"); - return; + goto out_err; } #ifdef PWC_MAGIC if (pdev->magic != PWC_MAGIC) { Err("pwc_disconnect() Magic number failed. Consult your scrolls and try again.\n"); - return; + goto out_err; } -#endif - +#endif + pdev->unplugged = 1; if (pdev->vdev != NULL) { - video_unregister_device(pdev->vdev); + video_unregister_device(pdev->vdev); if (pdev->vopen) { Info("Disconnected while device/video is open!\n"); - + /* Wake up any processes that might be waiting for a frame, let them return an error condition */ wake_up(&pdev->frameq); - + /* Wait until we get a 'go' from _close(). This used to have a gigantic race condition, since we kfree() - stuff here, but we have to wait until close() - is finished. + stuff here, but we have to wait until close() + is finished. */ - + Trace(TRACE_PROBE, "Sleeping on remove_ok.\n"); add_wait_queue(&pdev->remove_ok, &wait); set_current_state(TASK_UNINTERRUPTIBLE); @@ -1815,6 +1815,7 @@ device_hint[hint].pdev = NULL; pdev->udev = NULL; +out_err: unlock_kernel(); kfree(pdev); } diff -Nru a/drivers/usb/media/se401.c b/drivers/usb/media/se401.c --- a/drivers/usb/media/se401.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/se401.c Sat Jul 20 12:12:35 2002 @@ -1283,20 +1283,20 @@ } static struct file_operations se401_fops = { - owner: THIS_MODULE, - open: se401_open, - release: se401_close, - read: se401_read, - mmap: se401_mmap, - ioctl: se401_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = se401_open, + .release = se401_close, + .read = se401_read, + .mmap = se401_mmap, + .ioctl = se401_ioctl, + .llseek = no_llseek, }; static struct video_device se401_template = { - owner: THIS_MODULE, - name: "se401 USB camera", - type: VID_TYPE_CAPTURE, - hardware: VID_HARDWARE_SE401, - fops: &se401_fops, + .owner = THIS_MODULE, + .name = "se401 USB camera", + .type = VID_TYPE_CAPTURE, + .hardware = VID_HARDWARE_SE401, + .fops = &se401_fops, }; @@ -1523,7 +1523,7 @@ static struct usb_driver se401_driver = { name: "se401", id_table: device_table, - probe: se401_probe, + .probe = se401_probe, disconnect: se401_disconnect }; diff -Nru a/drivers/usb/media/stv680.c b/drivers/usb/media/stv680.c --- a/drivers/usb/media/stv680.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/media/stv680.c Sat Jul 20 12:12:34 2002 @@ -1432,20 +1432,20 @@ } /* stv680_read */ static struct file_operations stv680_fops = { - owner: THIS_MODULE, - open: stv_open, - release: stv_close, - read: stv680_read, - mmap: stv680_mmap, - ioctl: stv680_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = stv_open, + .release = stv_close, + .read = stv680_read, + .mmap = stv680_mmap, + .ioctl = stv680_ioctl, + .llseek = no_llseek, }; static struct video_device stv680_template = { - owner: THIS_MODULE, - name: "STV0680 USB camera", - type: VID_TYPE_CAPTURE, - hardware: VID_HARDWARE_SE401, - fops: &stv680_fops, + .owner = THIS_MODULE, + .name = "STV0680 USB camera", + .type = VID_TYPE_CAPTURE, + .hardware = VID_HARDWARE_SE401, + .fops = &stv680_fops, }; static void *stv680_probe (struct usb_device *dev, unsigned int ifnum, const struct usb_device_id *id) @@ -1545,10 +1545,10 @@ } static struct usb_driver stv680_driver = { - name: "stv680", - probe: stv680_probe, - disconnect: stv680_disconnect, - id_table: device_table + .name = "stv680", + .probe = stv680_probe, + .disconnect = stv680_disconnect, + .id_table = device_table }; /******************************************************************** diff -Nru a/drivers/usb/media/usbvideo.c b/drivers/usb/media/usbvideo.c --- a/drivers/usb/media/usbvideo.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/usbvideo.c Sat Jul 20 12:12:35 2002 @@ -1054,19 +1054,19 @@ } static struct file_operations usbvideo_fops = { - owner: THIS_MODULE, - open: usbvideo_v4l_open, - release: usbvideo_v4l_close, - read: usbvideo_v4l_read, - mmap: usbvideo_v4l_mmap, - ioctl: usbvideo_v4l_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = usbvideo_v4l_open, + .release =usbvideo_v4l_close, + .read = usbvideo_v4l_read, + .mmap = usbvideo_v4l_mmap, + .ioctl = usbvideo_v4l_ioctl, + .llseek = no_llseek, }; static struct video_device usbvideo_template = { - owner: THIS_MODULE, - type: VID_TYPE_CAPTURE, - hardware: VID_HARDWARE_CPIA, - fops: &usbvideo_fops, + .owner = THIS_MODULE, + .type = VID_TYPE_CAPTURE, + .hardware = VID_HARDWARE_CPIA, + .fops = &usbvideo_fops, }; uvd_t *usbvideo_AllocateDevice(usbvideo_t *cams) diff -Nru a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c --- a/drivers/usb/media/vicam.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/media/vicam.c Sat Jul 20 12:12:35 2002 @@ -640,20 +640,20 @@ /* FIXME - vicam_template - important */ static struct file_operations vicam_fops = { - owner: THIS_MODULE, - open: vicam_v4l_open, - release: vicam_v4l_close, - read: vicam_v4l_read, - mmap: vicam_v4l_mmap, - ioctl: vicam_v4l_ioctl, - llseek: no_llseek, + .owner = THIS_MODULE, + .open = vicam_v4l_open, + .release = vicam_v4l_close, + .read = vicam_v4l_read, + .mmap = vicam_v4l_mmap, + .ioctl = vicam_v4l_ioctl, + .llseek = no_llseek, }; static struct video_device vicam_template = { - owner: THIS_MODULE, - name: "vicam USB camera", - type: VID_TYPE_CAPTURE, - hardware: VID_HARDWARE_SE401, /* need to ask for own id */ - fops: &vicam_fops, + .owner = THIS_MODULE, + .name = "vicam USB camera", + .type = VID_TYPE_CAPTURE, + .hardware = VID_HARDWARE_SE401, /* need to ask for own id */ + .fops = &vicam_fops, }; /****************************************************************************** @@ -876,11 +876,11 @@ /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver vicam_driver = { - owner: THIS_MODULE, - name: "vicam", - probe: vicam_probe, - disconnect: vicam_disconnect, - id_table: vicam_table, + .owner = THIS_MODULE, + .name = "vicam", + .probe = vicam_probe, + .disconnect = vicam_disconnect, + .id_table = vicam_table, }; /****************************************************************************** diff -Nru a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c --- a/drivers/usb/misc/auerswald.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/misc/auerswald.c Sat Jul 20 12:12:35 2002 @@ -1879,13 +1879,13 @@ /* File operation structure */ static struct file_operations auerswald_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - read: auerchar_read, - write: auerchar_write, - ioctl: auerchar_ioctl, - open: auerchar_open, - release: auerchar_release, + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = auerchar_read, + .write = auerchar_write, + .ioctl = auerchar_ioctl, + .open = auerchar_open, + .release = auerchar_release, }; @@ -2138,10 +2138,10 @@ /* Standard usb driver struct */ static struct usb_driver auerswald_driver = { - name: "auerswald", - probe: auerswald_probe, - disconnect: auerswald_disconnect, - id_table: auerswald_ids, + .name = "auerswald", + .probe = auerswald_probe, + .disconnect = auerswald_disconnect, + .id_table = auerswald_ids, }; diff -Nru a/drivers/usb/misc/brlvger.c b/drivers/usb/misc/brlvger.c --- a/drivers/usb/misc/brlvger.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/misc/brlvger.c Sat Jul 20 12:12:35 2002 @@ -230,23 +230,23 @@ static struct file_operations brlvger_fops = { - owner: THIS_MODULE, - llseek: brlvger_llseek, - read: brlvger_read, - write: brlvger_write, - ioctl: brlvger_ioctl, - open: brlvger_open, - release: brlvger_release, - poll: brlvger_poll, + .owner = THIS_MODULE, + .llseek = brlvger_llseek, + .read = brlvger_read, + .write = brlvger_write, + .ioctl = brlvger_ioctl, + .open = brlvger_open, + .release = brlvger_release, + .poll = brlvger_poll, }; static struct usb_driver brlvger_driver = { - owner: THIS_MODULE, - name: "brlvger", - probe: brlvger_probe, - disconnect: brlvger_disconnect, - id_table: brlvger_ids, + .owner = THIS_MODULE, + .name = "brlvger", + .probe = brlvger_probe, + .disconnect = brlvger_disconnect, + .id_table = brlvger_ids, }; static int diff -Nru a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c --- a/drivers/usb/misc/rio500.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/misc/rio500.c Sat Jul 20 12:12:34 2002 @@ -443,11 +443,11 @@ static struct file_operations usb_rio_fops = { - read: read_rio, - write: write_rio, - ioctl: ioctl_rio, - open: open_rio, - release: close_rio, + .read = read_rio, + .write = write_rio, + .ioctl = ioctl_rio, + .open = open_rio, + .release = close_rio, }; static void *probe_rio(struct usb_device *dev, unsigned int ifnum, @@ -525,10 +525,10 @@ MODULE_DEVICE_TABLE (usb, rio_table); static struct usb_driver rio_driver = { - name: "rio500", - probe: probe_rio, - disconnect: disconnect_rio, - id_table: rio_table, + .name = "rio500", + .probe = probe_rio, + .disconnect = disconnect_rio, + .id_table = rio_table, }; int usb_rio_init(void) diff -Nru a/drivers/usb/misc/tiglusb.c b/drivers/usb/misc/tiglusb.c --- a/drivers/usb/misc/tiglusb.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/misc/tiglusb.c Sat Jul 20 12:12:34 2002 @@ -295,13 +295,13 @@ /* ----- kernel module registering ------------------------------------ */ static struct file_operations tiglusb_fops = { - owner: THIS_MODULE, - llseek: no_llseek, - read: tiglusb_read, - write: tiglusb_write, - ioctl: tiglusb_ioctl, - open: tiglusb_open, - release: tiglusb_release, + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = tiglusb_read, + .write = tiglusb_write, + .ioctl = tiglusb_ioctl, + .open = tiglusb_open, + .release = tiglusb_release, }; static int tiglusb_find_struct (void) @@ -407,11 +407,11 @@ MODULE_DEVICE_TABLE (usb, tiglusb_ids); static struct usb_driver tiglusb_driver = { - owner: THIS_MODULE, - name: "tiglusb", - probe: tiglusb_probe, - disconnect: tiglusb_disconnect, - id_table: tiglusb_ids, + .owner = THIS_MODULE, + .name = "tiglusb", + .probe = tiglusb_probe, + .disconnect = tiglusb_disconnect, + .id_table = tiglusb_ids, }; /* --- initialisation code ------------------------------------- */ diff -Nru a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c --- a/drivers/usb/misc/uss720.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/misc/uss720.c Sat Jul 20 12:12:34 2002 @@ -646,10 +646,10 @@ static struct usb_driver uss720_driver = { - name: "uss720", - probe: uss720_probe, - disconnect: uss720_disconnect, - id_table: uss720_table, + .name = "uss720", + .probe = uss720_probe, + .disconnect = uss720_disconnect, + .id_table = uss720_table, }; /* --------------------------------------------------------------------- */ diff -Nru a/drivers/usb/net/catc.c b/drivers/usb/net/catc.c --- a/drivers/usb/net/catc.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/net/catc.c Sat Jul 20 12:12:35 2002 @@ -941,10 +941,10 @@ MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { - name: "catc", - probe: catc_probe, - disconnect: catc_disconnect, - id_table: catc_id_table, + .name = "catc", + .probe = catc_probe, + .disconnect = catc_disconnect, + .id_table = catc_id_table, }; static int __init catc_init(void) diff -Nru a/drivers/usb/net/cdc-ether.c b/drivers/usb/net/cdc-ether.c --- a/drivers/usb/net/cdc-ether.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/net/cdc-ether.c Sat Jul 20 12:12:34 2002 @@ -1325,10 +1325,10 @@ ////////////////////////////////////////////////////////////////////////////// static struct usb_driver CDCEther_driver = { - name: "CDCEther", - probe: CDCEther_probe, - disconnect: CDCEther_disconnect, - id_table: CDCEther_ids, + .name = "CDCEther", + .probe = CDCEther_probe, + .disconnect = CDCEther_disconnect, + .id_table = CDCEther_ids, }; ////////////////////////////////////////////////////////////////////////////// diff -Nru a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c --- a/drivers/usb/net/kaweth.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/net/kaweth.c Sat Jul 20 12:12:35 2002 @@ -165,11 +165,11 @@ * kaweth_driver ****************************************************************/ static struct usb_driver kaweth_driver = { - owner: THIS_MODULE, - name: "kaweth", - probe: kaweth_probe, - disconnect: kaweth_disconnect, - id_table: usb_klsi_table, + .owner = THIS_MODULE, + .name = "kaweth", + .probe = kaweth_probe, + .disconnect = kaweth_disconnect, + .id_table = usb_klsi_table, }; typedef __u8 eth_addr_t[6]; diff -Nru a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c --- a/drivers/usb/net/pegasus.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/net/pegasus.c Sat Jul 20 12:12:35 2002 @@ -1148,10 +1148,10 @@ } static struct usb_driver pegasus_driver = { - name: driver_name, - probe: pegasus_probe, - disconnect: pegasus_disconnect, - id_table: pegasus_ids, + .name = driver_name, + .probe = pegasus_probe, + .disconnect = pegasus_disconnect, + .id_table = pegasus_ids, }; int __init pegasus_init(void) diff -Nru a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c --- a/drivers/usb/net/rtl8150.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/net/rtl8150.c Sat Jul 20 12:12:35 2002 @@ -110,10 +110,10 @@ const struct usb_device_id *id); static struct usb_driver rtl8150_driver = { - name: "rtl8150", - probe: rtl8150_probe, - disconnect: rtl8150_disconnect, - id_table: rtl8150_table, + .name = "rtl8150", + .probe = rtl8150_probe, + .disconnect = rtl8150_disconnect, + .id_table = rtl8150_table, }; /* diff -Nru a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c --- a/drivers/usb/net/usbnet.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/net/usbnet.c Sat Jul 20 12:12:34 2002 @@ -26,12 +26,7 @@ * See the LINUXDEV support. * * - * TODO: - * - * This needs to be retested for bulk queuing problems ... earlier versions - * seemed to find different types of problems in each HCD. Once they're fixed, - * re-enable queues to get higher bandwidth utilization (without needing - * to tweak MTU for larger packets). + * Status: * * - AN2720 ... not widely available, but reportedly works well * @@ -45,8 +40,8 @@ * but the Sharp Zaurus uses an incompatible protocol (extra checksums). * No reason not to merge the Zaurus protocol here too (got patch? :) * - * - For Netchip, use keventd to poll via control requests to detect hardware - * level "carrier detect". + * - For Netchip, should use keventd to poll via control requests to detect + * hardware level "carrier detect". * * - PL-230x ... the initialization protocol doesn't seem to match chip data * sheets, sometimes it's not needed and sometimes it hangs. Prolific has @@ -60,9 +55,9 @@ * * There are reports that bridging gives lower-than-usual throughput. * - * Craft smarter hotplug policy scripts ... ones that know how to arrange + * Need smarter hotplug policy scripts ... ones that know how to arrange * bridging with "brctl", and can handle static and dynamic ("pump") setups. - * Use those "peer connected" events. + * Use those eventual "peer connected" events. * * * CHANGELOG: @@ -122,7 +117,7 @@ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages -// #define REALLY_QUEUE +#define REALLY_QUEUE #if !defined (DEBUG) && defined (CONFIG_USB_DEBUG) # define DEBUG @@ -139,7 +134,7 @@ #define CONFIG_USB_PL2301 -#define DRIVER_VERSION "07-May-2002" +#define DRIVER_VERSION "17-Jul-2002" /*-------------------------------------------------------------------------*/ @@ -301,12 +296,12 @@ *-------------------------------------------------------------------------*/ static const struct driver_info an2720_info = { - description: "AnchorChips/Cypress 2720", + .description = "AnchorChips/Cypress 2720", // no reset available! // no check_connect available! - in: 2, out: 2, // direction distinguishes these - epsize: 64, + .in = 2, out: 2, // direction distinguishes these + .epsize =64, }; #endif /* CONFIG_USB_AN2720 */ @@ -324,10 +319,10 @@ *-------------------------------------------------------------------------*/ static const struct driver_info belkin_info = { - description: "Belkin, eTEK, or compatible", + .description = "Belkin, eTEK, or compatible", - in: 1, out: 1, // direction distinguishes these - epsize: 64, + .in = 1, out: 1, // direction distinguishes these + .epsize =64, }; #endif /* CONFIG_USB_BELKIN */ @@ -635,17 +630,17 @@ } static const struct driver_info genelink_info = { - description: "Genesys GeneLink", - flags: FLAG_FRAMING_GL | FLAG_NO_SETINT, - reset: genelink_reset, - rx_fixup: genelink_rx_fixup, - tx_fixup: genelink_tx_fixup, + .description = "Genesys GeneLink", + .flags = FLAG_FRAMING_GL | FLAG_NO_SETINT, + .reset = genelink_reset, + .rx_fixup = genelink_rx_fixup, + .tx_fixup = genelink_tx_fixup, - in: 1, out: 2, - epsize: 64, + .in = 1, out: 2, + .epsize =64, #ifdef GENELINK_ACK - check_connect: genelink_check_connect, + .check_connect =genelink_check_connect, #endif }; @@ -676,11 +671,11 @@ } static const struct driver_info linuxdev_info = { - description: "Linux Device", + .description = "Linux Device", // no reset defined (yet?) - check_connect: linuxdev_check_connect, - in: 2, out: 1, - epsize: 64, + .check_connect =linuxdev_check_connect, + .in = 2, out: 1, + .epsize =64, }; #endif /* CONFIG_USB_LINUXDEV */ @@ -1123,15 +1118,15 @@ } static const struct driver_info net1080_info = { - description: "NetChip TurboCONNECT", - flags: FLAG_FRAMING_NC, - reset: net1080_reset, - check_connect: net1080_check_connect, - rx_fixup: net1080_rx_fixup, - tx_fixup: net1080_tx_fixup, + .description = "NetChip TurboCONNECT", + .flags = FLAG_FRAMING_NC, + .reset = net1080_reset, + .check_connect =net1080_check_connect, + .rx_fixup = net1080_rx_fixup, + .tx_fixup = net1080_tx_fixup, - in: 1, out: 1, // direction distinguishes these - epsize: 64, + .in = 1, out: 1, // direction distinguishes these + .epsize =64, }; #endif /* CONFIG_USB_NET1080 */ @@ -1192,13 +1187,13 @@ } static const struct driver_info prolific_info = { - description: "Prolific PL-2301/PL-2302", - flags: FLAG_NO_SETINT, + .description = "Prolific PL-2301/PL-2302", + .flags = FLAG_NO_SETINT, /* some PL-2302 versions seem to fail usb_set_interface() */ - reset: pl_reset, + .reset = pl_reset, - in: 3, out: 2, - epsize: 64, + .in = 3, out: 2, + .epsize =64, }; #endif /* CONFIG_USB_PL2301 */ @@ -1815,20 +1810,19 @@ } #endif /* CONFIG_USB_NET1080 */ - netif_stop_queue (net); switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { case -EPIPE: + netif_stop_queue (net); defer_kevent (dev, EVENT_TX_HALT); break; default: - netif_start_queue (net); dbg ("%s tx: submit urb err %d", net->name, retval); break; case 0: net->trans_start = jiffies; __skb_queue_tail (&dev->txq, skb); - if (dev->txq.qlen < TX_QLEN) - netif_start_queue (net); + if (dev->txq.qlen >= TX_QLEN) + netif_stop_queue (net); } spin_unlock_irqrestore (&dev->txq.lock, flags); @@ -2054,32 +2048,32 @@ #ifdef CONFIG_USB_AN2720 { USB_DEVICE (0x0547, 0x2720), // AnchorChips defaults - driver_info: (unsigned long) &an2720_info, + .driver_info = (unsigned long) &an2720_info, }, { USB_DEVICE (0x0547, 0x2727), // Xircom PGUNET - driver_info: (unsigned long) &an2720_info, + .driver_info = (unsigned long) &an2720_info, }, #endif #ifdef CONFIG_USB_BELKIN { USB_DEVICE (0x050d, 0x0004), // Belkin - driver_info: (unsigned long) &belkin_info, + .driver_info = (unsigned long) &belkin_info, }, { USB_DEVICE (0x056c, 0x8100), // eTEK - driver_info: (unsigned long) &belkin_info, + .driver_info = (unsigned long) &belkin_info, }, { USB_DEVICE (0x0525, 0x9901), // Advance USBNET (eTEK) - driver_info: (unsigned long) &belkin_info, + .driver_info = (unsigned long) &belkin_info, }, #endif #ifdef CONFIG_USB_GENESYS { USB_DEVICE (0x05e3, 0x0502), // GL620USB-A - driver_info: (unsigned long) &genelink_info, + .driver_info = (unsigned long) &genelink_info, }, #endif @@ -2091,28 +2085,28 @@ { // 1183 = 0x049F, both used as hex values? USB_DEVICE (0x049F, 0x505A), // Compaq "Itsy" - driver_info: (unsigned long) &linuxdev_info, + .driver_info = (unsigned long) &linuxdev_info, }, #endif #ifdef CONFIG_USB_NET1080 { USB_DEVICE (0x0525, 0x1080), // NetChip ref design - driver_info: (unsigned long) &net1080_info, + .driver_info = (unsigned long) &net1080_info, }, { USB_DEVICE (0x06D0, 0x0622), // Laplink Gold - driver_info: (unsigned long) &net1080_info, + .driver_info = (unsigned long) &net1080_info, }, #endif #ifdef CONFIG_USB_PL2301 { USB_DEVICE (0x067b, 0x0000), // PL-2301 - driver_info: (unsigned long) &prolific_info, + .driver_info = (unsigned long) &prolific_info, }, { USB_DEVICE (0x067b, 0x0001), // PL-2302 - driver_info: (unsigned long) &prolific_info, + .driver_info = (unsigned long) &prolific_info, }, #endif @@ -2123,10 +2117,10 @@ MODULE_DEVICE_TABLE (usb, products); static struct usb_driver usbnet_driver = { - name: driver_name, - id_table: products, - probe: usbnet_probe, - disconnect: usbnet_disconnect, + .name = driver_name, + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, }; /*-------------------------------------------------------------------------*/ diff -Nru a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c --- a/drivers/usb/serial/belkin_sa.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/belkin_sa.c Sat Jul 20 12:12:35 2002 @@ -116,21 +116,21 @@ /* All of the device info needed for the serial converters */ static struct usb_serial_device_type belkin_device = { - owner: THIS_MODULE, - name: "Belkin / Peracom / GoHubs USB Serial Adapter", - id_table: id_table_combined, - num_interrupt_in: 1, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: belkin_sa_open, - close: belkin_sa_close, - read_int_callback: belkin_sa_read_int_callback, /* How we get the status info */ - ioctl: belkin_sa_ioctl, - set_termios: belkin_sa_set_termios, - break_ctl: belkin_sa_break_ctl, - attach: belkin_sa_startup, - shutdown: belkin_sa_shutdown, + .owner = THIS_MODULE, + .name = "Belkin / Peracom / GoHubs USB Serial Adapter", + .id_table = id_table_combined, + .num_interrupt_in = 1, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = belkin_sa_open, + .close = belkin_sa_close, + .read_int_callback = belkin_sa_read_int_callback, /* How we get the status info */ + .ioctl = belkin_sa_ioctl, + .set_termios = belkin_sa_set_termios, + .break_ctl = belkin_sa_break_ctl, + .attach = belkin_sa_startup, + .shutdown = belkin_sa_shutdown, }; diff -Nru a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c --- a/drivers/usb/serial/cyberjack.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/cyberjack.c Sat Jul 20 12:12:35 2002 @@ -74,21 +74,21 @@ MODULE_DEVICE_TABLE (usb, id_table); static struct usb_serial_device_type cyberjack_device = { - owner: THIS_MODULE, - name: "Reiner SCT Cyberjack USB card reader", - id_table: id_table, - num_interrupt_in: 1, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - attach: cyberjack_startup, - shutdown: cyberjack_shutdown, - open: cyberjack_open, - close: cyberjack_close, - write: cyberjack_write, - read_int_callback: cyberjack_read_int_callback, - read_bulk_callback: cyberjack_read_bulk_callback, - write_bulk_callback: cyberjack_write_bulk_callback, + .owner = THIS_MODULE, + .name = "Reiner SCT Cyberjack USB card reader", + .id_table = id_table, + .num_interrupt_in = 1, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .attach = cyberjack_startup, + .shutdown = cyberjack_shutdown, + .open = cyberjack_open, + .close = cyberjack_close, + .write = cyberjack_write, + .read_int_callback = cyberjack_read_int_callback, + .read_bulk_callback = cyberjack_read_bulk_callback, + .write_bulk_callback = cyberjack_write_bulk_callback, }; struct cyberjack_private { diff -Nru a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c --- a/drivers/usb/serial/digi_acceleport.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/digi_acceleport.c Sat Jul 20 12:12:35 2002 @@ -498,51 +498,51 @@ /* device info needed for the Digi serial converter */ static struct usb_serial_device_type digi_acceleport_2_device = { - owner: THIS_MODULE, - name: "Digi USB", - id_table: id_table_2, - num_interrupt_in: 0, - num_bulk_in: 4, - num_bulk_out: 4, - num_ports: 3, - open: digi_open, - close: digi_close, - write: digi_write, - write_room: digi_write_room, - write_bulk_callback: digi_write_bulk_callback, - read_bulk_callback: digi_read_bulk_callback, - chars_in_buffer: digi_chars_in_buffer, - throttle: digi_rx_throttle, - unthrottle: digi_rx_unthrottle, - ioctl: digi_ioctl, - set_termios: digi_set_termios, - break_ctl: digi_break_ctl, - attach: digi_startup, - shutdown: digi_shutdown, + .owner = THIS_MODULE, + .name = "Digi USB", + .id_table = id_table_2, + .num_interrupt_in = 0, + .num_bulk_in = 4, + .num_bulk_out = 4, + .num_ports = 3, + .open = digi_open, + .close = digi_close, + .write = digi_write, + .write_room = digi_write_room, + .write_bulk_callback = digi_write_bulk_callback, + .read_bulk_callback = digi_read_bulk_callback, + .chars_in_buffer = digi_chars_in_buffer, + .throttle = digi_rx_throttle, + .unthrottle = digi_rx_unthrottle, + .ioctl = digi_ioctl, + .set_termios = digi_set_termios, + .break_ctl = digi_break_ctl, + .attach = digi_startup, + .shutdown = digi_shutdown, }; static struct usb_serial_device_type digi_acceleport_4_device = { - owner: THIS_MODULE, - name: "Digi USB", - id_table: id_table_4, - num_interrupt_in: 0, - num_bulk_in: 5, - num_bulk_out: 5, - num_ports: 4, - open: digi_open, - close: digi_close, - write: digi_write, - write_room: digi_write_room, - write_bulk_callback: digi_write_bulk_callback, - read_bulk_callback: digi_read_bulk_callback, - chars_in_buffer: digi_chars_in_buffer, - throttle: digi_rx_throttle, - unthrottle: digi_rx_unthrottle, - ioctl: digi_ioctl, - set_termios: digi_set_termios, - break_ctl: digi_break_ctl, - attach: digi_startup, - shutdown: digi_shutdown, + .owner = THIS_MODULE, + .name = "Digi USB", + .id_table = id_table_4, + .num_interrupt_in = 0, + .num_bulk_in = 5, + .num_bulk_out = 5, + .num_ports = 4, + .open = digi_open, + .close = digi_close, + .write = digi_write, + .write_room = digi_write_room, + .write_bulk_callback = digi_write_bulk_callback, + .read_bulk_callback = digi_read_bulk_callback, + .chars_in_buffer = digi_chars_in_buffer, + .throttle = digi_rx_throttle, + .unthrottle = digi_rx_unthrottle, + .ioctl = digi_ioctl, + .set_termios = digi_set_termios, + .break_ctl = digi_break_ctl, + .attach = digi_startup, + .shutdown = digi_shutdown, }; diff -Nru a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c --- a/drivers/usb/serial/empeg.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/empeg.c Sat Jul 20 12:12:35 2002 @@ -111,26 +111,26 @@ MODULE_DEVICE_TABLE (usb, id_table); static struct usb_serial_device_type empeg_device = { - owner: THIS_MODULE, - name: "Empeg", - id_table: id_table, - num_interrupt_in: 0, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: empeg_open, - close: empeg_close, - throttle: empeg_throttle, - unthrottle: empeg_unthrottle, - attach: empeg_startup, - shutdown: empeg_shutdown, - ioctl: empeg_ioctl, - set_termios: empeg_set_termios, - write: empeg_write, - write_room: empeg_write_room, - chars_in_buffer: empeg_chars_in_buffer, - write_bulk_callback: empeg_write_bulk_callback, - read_bulk_callback: empeg_read_bulk_callback, + .owner = THIS_MODULE, + .name = "Empeg", + .id_table = id_table, + .num_interrupt_in = 0, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = empeg_open, + .close = empeg_close, + .throttle = empeg_throttle, + .unthrottle = empeg_unthrottle, + .attach = empeg_startup, + .shutdown = empeg_shutdown, + .ioctl = empeg_ioctl, + .set_termios = empeg_set_termios, + .write = empeg_write, + .write_room = empeg_write_room, + .chars_in_buffer = empeg_chars_in_buffer, + .write_bulk_callback = empeg_write_bulk_callback, + .read_bulk_callback = empeg_read_bulk_callback, }; #define NUM_URBS 16 diff -Nru a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c --- a/drivers/usb/serial/ftdi_sio.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/ftdi_sio.c Sat Jul 20 12:12:35 2002 @@ -173,45 +173,45 @@ which share common code */ static struct usb_serial_device_type ftdi_sio_device = { - owner: THIS_MODULE, - name: "FTDI SIO", - id_table: id_table_sio, - num_interrupt_in: 0, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: ftdi_sio_open, - close: ftdi_sio_close, - write: ftdi_sio_write, - write_room: ftdi_sio_write_room, - read_bulk_callback: ftdi_sio_read_bulk_callback, - write_bulk_callback: ftdi_sio_write_bulk_callback, - ioctl: ftdi_sio_ioctl, - set_termios: ftdi_sio_set_termios, - break_ctl: ftdi_sio_break_ctl, - attach: ftdi_sio_startup, - shutdown: ftdi_sio_shutdown, + .owner = THIS_MODULE, + .name = "FTDI SIO", + .id_table = id_table_sio, + .num_interrupt_in = 0, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = ftdi_sio_open, + .close = ftdi_sio_close, + .write = ftdi_sio_write, + .write_room = ftdi_sio_write_room, + .read_bulk_callback = ftdi_sio_read_bulk_callback, + .write_bulk_callback = ftdi_sio_write_bulk_callback, + .ioctl = ftdi_sio_ioctl, + .set_termios = ftdi_sio_set_termios, + .break_ctl = ftdi_sio_break_ctl, + .attach = ftdi_sio_startup, + .shutdown = ftdi_sio_shutdown, }; static struct usb_serial_device_type ftdi_8U232AM_device = { - owner: THIS_MODULE, - name: "FTDI 8U232AM", - id_table: id_table_8U232AM, - num_interrupt_in: 0, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: ftdi_sio_open, - close: ftdi_sio_close, - write: ftdi_sio_write, - write_room: ftdi_sio_write_room, - read_bulk_callback: ftdi_sio_read_bulk_callback, - write_bulk_callback: ftdi_sio_write_bulk_callback, - ioctl: ftdi_sio_ioctl, - set_termios: ftdi_sio_set_termios, - break_ctl: ftdi_sio_break_ctl, - attach: ftdi_8U232AM_startup, - shutdown: ftdi_sio_shutdown, + .owner = THIS_MODULE, + .name = "FTDI 8U232AM", + .id_table = id_table_8U232AM, + .num_interrupt_in = 0, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = ftdi_sio_open, + .close = ftdi_sio_close, + .write = ftdi_sio_write, + .write_room = ftdi_sio_write_room, + .read_bulk_callback = ftdi_sio_read_bulk_callback, + .write_bulk_callback = ftdi_sio_write_bulk_callback, + .ioctl = ftdi_sio_ioctl, + .set_termios = ftdi_sio_set_termios, + .break_ctl = ftdi_sio_break_ctl, + .attach = ftdi_8U232AM_startup, + .shutdown = ftdi_sio_shutdown, }; diff -Nru a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c --- a/drivers/usb/serial/ipaq.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/ipaq.c Sat Jul 20 12:12:35 2002 @@ -90,22 +90,22 @@ /* All of the device info needed for the Compaq iPAQ */ struct usb_serial_device_type ipaq_device = { - owner: THIS_MODULE, - name: "Compaq iPAQ", - id_table: ipaq_id_table, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: ipaq_open, - close: ipaq_close, - attach: ipaq_startup, - shutdown: ipaq_shutdown, - write: ipaq_write, - write_room: ipaq_write_room, - chars_in_buffer: ipaq_chars_in_buffer, - read_bulk_callback: ipaq_read_bulk_callback, - write_bulk_callback: ipaq_write_bulk_callback, + .owner = THIS_MODULE, + .name = "Compaq iPAQ", + .id_table = ipaq_id_table, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = ipaq_open, + .close = ipaq_close, + .attach = ipaq_startup, + .shutdown = ipaq_shutdown, + .write = ipaq_write, + .write_room = ipaq_write_room, + .chars_in_buffer = ipaq_chars_in_buffer, + .read_bulk_callback = ipaq_read_bulk_callback, + .write_bulk_callback = ipaq_write_bulk_callback, }; static spinlock_t write_list_lock; diff -Nru a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c --- a/drivers/usb/serial/ir-usb.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/ir-usb.c Sat Jul 20 12:12:35 2002 @@ -131,20 +131,20 @@ struct usb_serial_device_type ir_device = { - owner: THIS_MODULE, - name: "IR Dongle", - id_table: id_table, - num_interrupt_in: 1, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - set_termios: ir_set_termios, - attach: ir_startup, - open: ir_open, - close: ir_close, - write: ir_write, - write_bulk_callback: ir_write_bulk_callback, - read_bulk_callback: ir_read_bulk_callback, + .owner = THIS_MODULE, + .name = "IR Dongle", + .id_table = id_table, + .num_interrupt_in = 1, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .set_termios = ir_set_termios, + .attach = ir_startup, + .open = ir_open, + .close = ir_close, + .write = ir_write, + .write_bulk_callback = ir_write_bulk_callback, + .read_bulk_callback = ir_read_bulk_callback, }; static inline void irda_usb_dump_class_desc(struct irda_class_desc *desc) diff -Nru a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c --- a/drivers/usb/serial/keyspan.c Sat Jul 20 12:12:34 2002 +++ b/drivers/usb/serial/keyspan.c Sat Jul 20 12:12:34 2002 @@ -1103,28 +1103,28 @@ } keyspan_callbacks[] = { { /* msg_usa26 callbacks */ - instat_callback: usa26_instat_callback, - glocont_callback: usa26_glocont_callback, - indat_callback: usa26_indat_callback, - outdat_callback: usa2x_outdat_callback, - inack_callback: usa26_inack_callback, - outcont_callback: usa26_outcont_callback, + .instat_callback = usa26_instat_callback, + .glocont_callback = usa26_glocont_callback, + .indat_callback = usa26_indat_callback, + .outdat_callback = usa2x_outdat_callback, + .inack_callback = usa26_inack_callback, + .outcont_callback = usa26_outcont_callback, }, { /* msg_usa28 callbacks */ - instat_callback: usa28_instat_callback, - glocont_callback: usa28_glocont_callback, - indat_callback: usa28_indat_callback, - outdat_callback: usa2x_outdat_callback, - inack_callback: usa28_inack_callback, - outcont_callback: usa28_outcont_callback, + .instat_callback = usa28_instat_callback, + .glocont_callback = usa28_glocont_callback, + .indat_callback = usa28_indat_callback, + .outdat_callback = usa2x_outdat_callback, + .inack_callback = usa28_inack_callback, + .outcont_callback = usa28_outcont_callback, }, { /* msg_usa49 callbacks */ - instat_callback: usa49_instat_callback, - glocont_callback: usa49_glocont_callback, - indat_callback: usa49_indat_callback, - outdat_callback: usa2x_outdat_callback, - inack_callback: usa49_inack_callback, - outcont_callback: usa49_outcont_callback, + .instat_callback = usa49_instat_callback, + .glocont_callback = usa49_glocont_callback, + .indat_callback = usa49_indat_callback, + .outdat_callback = usa2x_outdat_callback, + .inack_callback = usa49_inack_callback, + .outcont_callback = usa49_outcont_callback, } }; diff -Nru a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c --- a/drivers/usb/serial/keyspan_pda.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/keyspan_pda.c Sat Jul 20 12:12:35 2002 @@ -804,52 +804,52 @@ #ifdef KEYSPAN static struct usb_serial_device_type keyspan_pda_fake_device = { - owner: THIS_MODULE, - name: "Keyspan PDA - (prerenumeration)", - id_table: id_table_fake, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: NUM_DONT_CARE, - num_bulk_out: NUM_DONT_CARE, - num_ports: 1, - attach: keyspan_pda_fake_startup, + .owner = THIS_MODULE, + .name = "Keyspan PDA - (prerenumeration)", + .id_table = id_table_fake, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 1, + .attach = keyspan_pda_fake_startup, }; #endif #ifdef XIRCOM static struct usb_serial_device_type xircom_pgs_fake_device = { - owner: THIS_MODULE, - name: "Xircom / Entregra PGS - (prerenumeration)", - id_table: id_table_fake_xircom, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: NUM_DONT_CARE, - num_bulk_out: NUM_DONT_CARE, - num_ports: 1, - attach: keyspan_pda_fake_startup, + .owner = THIS_MODULE, + .name = "Xircom / Entregra PGS - (prerenumeration)", + .id_table = id_table_fake_xircom, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 1, + .attach = keyspan_pda_fake_startup, }; #endif static struct usb_serial_device_type keyspan_pda_device = { - owner: THIS_MODULE, - name: "Keyspan PDA", - id_table: id_table_std, - num_interrupt_in: 1, - num_bulk_in: 0, - num_bulk_out: 1, - num_ports: 1, - open: keyspan_pda_open, - close: keyspan_pda_close, - write: keyspan_pda_write, - write_room: keyspan_pda_write_room, - write_bulk_callback: keyspan_pda_write_bulk_callback, - read_int_callback: keyspan_pda_rx_interrupt, - chars_in_buffer: keyspan_pda_chars_in_buffer, - throttle: keyspan_pda_rx_throttle, - unthrottle: keyspan_pda_rx_unthrottle, - ioctl: keyspan_pda_ioctl, - set_termios: keyspan_pda_set_termios, - break_ctl: keyspan_pda_break_ctl, - attach: keyspan_pda_startup, - shutdown: keyspan_pda_shutdown, + .owner = THIS_MODULE, + .name = "Keyspan PDA", + .id_table = id_table_std, + .num_interrupt_in = 1, + .num_bulk_in = 0, + .num_bulk_out = 1, + .num_ports = 1, + .open = keyspan_pda_open, + .close = keyspan_pda_close, + .write = keyspan_pda_write, + .write_room = keyspan_pda_write_room, + .write_bulk_callback = keyspan_pda_write_bulk_callback, + .read_int_callback = keyspan_pda_rx_interrupt, + .chars_in_buffer = keyspan_pda_chars_in_buffer, + .throttle = keyspan_pda_rx_throttle, + .unthrottle = keyspan_pda_rx_unthrottle, + .ioctl = keyspan_pda_ioctl, + .set_termios = keyspan_pda_set_termios, + .break_ctl = keyspan_pda_break_ctl, + .attach = keyspan_pda_startup, + .shutdown = keyspan_pda_shutdown, }; diff -Nru a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c --- a/drivers/usb/serial/kl5kusb105.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/kl5kusb105.c Sat Jul 20 12:12:35 2002 @@ -119,27 +119,27 @@ static struct usb_serial_device_type kl5kusb105d_device = { - owner: THIS_MODULE, - name: "KL5KUSB105D / PalmConnect", - id_table: id_table, - num_interrupt_in: 1, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: klsi_105_open, - close: klsi_105_close, - write: klsi_105_write, - write_bulk_callback: klsi_105_write_bulk_callback, - chars_in_buffer: klsi_105_chars_in_buffer, - write_room: klsi_105_write_room, - read_bulk_callback: klsi_105_read_bulk_callback, - ioctl: klsi_105_ioctl, - set_termios: klsi_105_set_termios, + .owner = THIS_MODULE, + .name = "KL5KUSB105D / PalmConnect", + .id_table = id_table, + .num_interrupt_in = 1, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = klsi_105_open, + .close = klsi_105_close, + .write = klsi_105_write, + .write_bulk_callback = klsi_105_write_bulk_callback, + .chars_in_buffer = klsi_105_chars_in_buffer, + .write_room = klsi_105_write_room, + .read_bulk_callback =klsi_105_read_bulk_callback, + .ioctl = klsi_105_ioctl, + .set_termios = klsi_105_set_termios, /*break_ctl: klsi_105_break_ctl,*/ - attach: klsi_105_startup, - shutdown: klsi_105_shutdown, - throttle: klsi_105_throttle, - unthrottle: klsi_105_unthrottle, + .attach = klsi_105_startup, + .shutdown = klsi_105_shutdown, + .throttle = klsi_105_throttle, + .unthrottle = klsi_105_unthrottle, }; struct klsi_105_port_settings { diff -Nru a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c --- a/drivers/usb/serial/mct_u232.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/mct_u232.c Sat Jul 20 12:12:35 2002 @@ -141,25 +141,25 @@ static struct usb_serial_device_type mct_u232_device = { - owner: THIS_MODULE, - name: "Magic Control Technology USB-RS232", - id_table: id_table_combined, - num_interrupt_in: 2, - num_bulk_in: 0, - num_bulk_out: 1, - num_ports: 1, - open: mct_u232_open, - close: mct_u232_close, + .owner = THIS_MODULE, + .name = "Magic Control Technology USB-RS232", + .id_table = id_table_combined, + .num_interrupt_in = 2, + .num_bulk_in = 0, + .num_bulk_out = 1, + .num_ports = 1, + .open = mct_u232_open, + .close = mct_u232_close, #ifdef FIX_WRITE_RETURN_CODE_PROBLEM - write: mct_u232_write, - write_bulk_callback: mct_u232_write_bulk_callback, + .write = mct_u232_write, + .write_bulk_callback = mct_u232_write_bulk_callback, #endif - read_int_callback: mct_u232_read_int_callback, - ioctl: mct_u232_ioctl, - set_termios: mct_u232_set_termios, - break_ctl: mct_u232_break_ctl, - attach: mct_u232_startup, - shutdown: mct_u232_shutdown, + .read_int_callback = mct_u232_read_int_callback, + .ioctl = mct_u232_ioctl, + .set_termios = mct_u232_set_termios, + .break_ctl = mct_u232_break_ctl, + .attach = mct_u232_startup, + .shutdown = mct_u232_shutdown, }; diff -Nru a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c --- a/drivers/usb/serial/omninet.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/omninet.c Sat Jul 20 12:12:35 2002 @@ -85,20 +85,20 @@ static struct usb_serial_device_type zyxel_omninet_device = { - owner: THIS_MODULE, - name: "ZyXEL - omni.net lcd plus usb", - id_table: id_table, - num_interrupt_in: 1, - num_bulk_in: 1, - num_bulk_out: 2, - num_ports: 1, - open: omninet_open, - close: omninet_close, - write: omninet_write, - write_room: omninet_write_room, - read_bulk_callback: omninet_read_bulk_callback, - write_bulk_callback: omninet_write_bulk_callback, - shutdown: omninet_shutdown, + .owner = THIS_MODULE, + .name = "ZyXEL - omni.net lcd plus usb", + .id_table = id_table, + .num_interrupt_in = 1, + .num_bulk_in = 1, + .num_bulk_out = 2, + .num_ports = 1, + .open = omninet_open, + .close = omninet_close, + .write = omninet_write, + .write_room = omninet_write_room, + .read_bulk_callback = omninet_read_bulk_callback, + .write_bulk_callback = omninet_write_bulk_callback, + .shutdown = omninet_shutdown, }; diff -Nru a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c --- a/drivers/usb/serial/pl2303.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/pl2303.c Sat Jul 20 12:12:35 2002 @@ -118,24 +118,24 @@ /* All of the device info needed for the PL2303 SIO serial converter */ static struct usb_serial_device_type pl2303_device = { - owner: THIS_MODULE, - name: "PL-2303", - id_table: id_table, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: pl2303_open, - close: pl2303_close, - write: pl2303_write, - ioctl: pl2303_ioctl, - break_ctl: pl2303_break_ctl, - set_termios: pl2303_set_termios, - read_bulk_callback: pl2303_read_bulk_callback, - read_int_callback: pl2303_read_int_callback, - write_bulk_callback: pl2303_write_bulk_callback, - attach: pl2303_startup, - shutdown: pl2303_shutdown, + .owner = THIS_MODULE, + .name = "PL-2303", + .id_table = id_table, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = pl2303_open, + .close = pl2303_close, + .write = pl2303_write, + .ioctl = pl2303_ioctl, + .break_ctl = pl2303_break_ctl, + .set_termios = pl2303_set_termios, + .read_bulk_callback = pl2303_read_bulk_callback, + .read_int_callback = pl2303_read_int_callback, + .write_bulk_callback = pl2303_write_bulk_callback, + .attach = pl2303_startup, + .shutdown = pl2303_shutdown, }; struct pl2303_private { diff -Nru a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c --- a/drivers/usb/serial/safe_serial.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/safe_serial.c Sat Jul 20 12:12:35 2002 @@ -399,17 +399,17 @@ } static struct usb_serial_device_type safe_device = { - owner: THIS_MODULE, - name: "Safe", - id_table: id_table, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: NUM_DONT_CARE, - num_bulk_out: NUM_DONT_CARE, - num_ports: 1, - write: safe_write, - write_room: safe_write_room, - read_bulk_callback: safe_read_bulk_callback, - attach: safe_startup, + .owner = THIS_MODULE, + .name = "Safe", + .id_table = id_table, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 1, + .write = safe_write, + .write_room = safe_write_room, + .read_bulk_callback = safe_read_bulk_callback, + .attach = safe_startup, }; static int __init safe_init (void) diff -Nru a/drivers/usb/serial/usbserial.c b/drivers/usb/serial/usbserial.c --- a/drivers/usb/serial/usbserial.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/usbserial.c Sat Jul 20 12:12:35 2002 @@ -366,14 +366,14 @@ /* All of the device info needed for the Generic Serial Converter */ static struct usb_serial_device_type generic_device = { - owner: THIS_MODULE, - name: "Generic", - id_table: generic_device_ids, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: NUM_DONT_CARE, - num_bulk_out: NUM_DONT_CARE, - num_ports: 1, - shutdown: generic_shutdown, + .owner = THIS_MODULE, + .name = "Generic", + .id_table = generic_device_ids, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 1, + .shutdown = generic_shutdown, }; #endif @@ -395,10 +395,10 @@ static void usb_serial_disconnect(struct usb_device *dev, void *ptr); static struct usb_driver usb_serial_driver = { - name: "serial", - probe: usb_serial_probe, - disconnect: usb_serial_disconnect, - id_table: NULL, /* check all devices */ + .name = "serial", + .probe = usb_serial_probe, + .disconnect = usb_serial_disconnect, + .id_table = NULL, /* check all devices */ }; /* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead @@ -447,24 +447,18 @@ good_spot = 1; for (j = 1; j <= num_ports-1; ++j) - if (serial_table[i+j]) + if ((serial_table[i+j]) || (i+j >= SERIAL_TTY_MINORS)) { good_spot = 0; + i += j; + break; + } if (good_spot == 0) continue; - if (!serial) { - serial = kmalloc(sizeof(*serial), GFP_KERNEL); - if (!serial) { - err(__FUNCTION__ " - Out of memory"); - return NULL; - } - memset(serial, 0, sizeof(*serial)); - } serial->magic = USB_SERIAL_MAGIC; - serial_table[i] = serial; *minor = i; dbg(__FUNCTION__ " - minor base = %d", *minor); - for (i = *minor+1; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i) + for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i) serial_table[i] = serial; return serial; } @@ -1207,14 +1201,14 @@ return(NULL); } + serial = create_serial (dev, interface, type); + if (!serial) { + err ("%s - out of memory", __FUNCTION__); + return NULL; + } + /* if this device type has a probe function, call it */ if (type->probe) { - serial = create_serial (dev, interface, type); - if (!serial) { - err ("%s - out of memory", __FUNCTION__); - return NULL; - } - if (type->owner) __MOD_INC_USE_COUNT(type->owner); retval = type->probe (serial); @@ -1293,6 +1287,7 @@ num_ports = num_bulk_out; if (num_ports == 0) { err("Generic device with no bulk out, not allowed."); + kfree (serial); return NULL; } } @@ -1300,14 +1295,6 @@ if (!num_ports) { /* if this device type has a calc_num_ports function, call it */ if (type->calc_num_ports) { - if (!serial) { - serial = create_serial (dev, interface, type); - if (!serial) { - err ("%s - out of memory", __FUNCTION__); - return NULL; - } - } - if (type->owner) __MOD_INC_USE_COUNT(type->owner); num_ports = type->calc_num_ports (serial); @@ -1318,22 +1305,17 @@ num_ports = type->num_ports; } - serial = get_free_serial (serial, num_ports, &minor); - if (serial == NULL) { + if (get_free_serial (serial, num_ports, &minor) == NULL) { err("No more free serial devices"); + kfree (serial); return NULL; } - serial->dev = dev; - serial->type = type; - serial->interface = interface; serial->minor = minor; serial->num_ports = num_ports; serial->num_bulk_in = num_bulk_in; serial->num_bulk_out = num_bulk_out; serial->num_interrupt_in = num_interrupt_in; - serial->vendor = dev->descriptor.idVendor; - serial->product = dev->descriptor.idProduct; /* set up the endpoint information */ for (i = 0; i < num_bulk_in; ++i) { @@ -1577,32 +1559,32 @@ static struct tty_driver serial_tty_driver = { - magic: TTY_DRIVER_MAGIC, - driver_name: "usb-serial", - name: "usb/tts/%d", - major: SERIAL_TTY_MAJOR, - minor_start: 0, - num: SERIAL_TTY_MINORS, - type: TTY_DRIVER_TYPE_SERIAL, - subtype: SERIAL_TYPE_NORMAL, - flags: TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, - - refcount: &serial_refcount, - table: serial_tty, - termios: serial_termios, - termios_locked: serial_termios_locked, - - open: serial_open, - close: serial_close, - write: serial_write, - write_room: serial_write_room, - ioctl: serial_ioctl, - set_termios: serial_set_termios, - throttle: serial_throttle, - unthrottle: serial_unthrottle, - break_ctl: serial_break, - chars_in_buffer: serial_chars_in_buffer, - read_proc: serial_read_proc, + .magic = TTY_DRIVER_MAGIC, + .driver_name = "usb-serial", + .name = "usb/tts/%d", + .major = SERIAL_TTY_MAJOR, + .minor_start = 0, + .num = SERIAL_TTY_MINORS, + .type = TTY_DRIVER_TYPE_SERIAL, + .subtype = SERIAL_TYPE_NORMAL, + .flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, + + .refcount = &serial_refcount, + .table = serial_tty, + .termios = serial_termios, + .termios_locked = serial_termios_locked, + + .open = serial_open, + .close = serial_close, + .write = serial_write, + .write_room = serial_write_room, + .ioctl = serial_ioctl, + .set_termios = serial_set_termios, + .throttle = serial_throttle, + .unthrottle = serial_unthrottle, + .break_ctl = serial_break, + .chars_in_buffer = serial_chars_in_buffer, + .read_proc = serial_read_proc, }; @@ -1931,15 +1913,15 @@ #endif static struct console usbcons = { - name: "ttyUSB", /* only [8] */ - write: usb_console_write, + .name = "ttyUSB", /* only [8] */ + .write = usb_console_write, #if 0 - device: usb_console_device, /* TBD */ - wait_key: usb_console_wait_key, /* TBD */ + .device = usb_console_device, /* TBD */ + .wait_key = usb_console_wait_key, /* TBD */ #endif - setup: usb_console_setup, - flags: CON_PRINTBUFFER, - index: -1, + .setup = usb_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, }; #endif /* CONFIG_USB_SERIAL_CONSOLE */ diff -Nru a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c --- a/drivers/usb/serial/visor.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/visor.c Sat Jul 20 12:12:35 2002 @@ -218,50 +218,50 @@ /* All of the device info needed for the Handspring Visor, and Palm 4.0 devices */ static struct usb_serial_device_type handspring_device = { - owner: THIS_MODULE, - name: "Handspring Visor / Palm 4.0 / Clié 4.x", - id_table: id_table, - num_interrupt_in: 0, - num_bulk_in: 2, - num_bulk_out: 2, - num_ports: 2, - open: visor_open, - close: visor_close, - throttle: visor_throttle, - unthrottle: visor_unthrottle, - probe: visor_probe, - calc_num_ports: visor_calc_num_ports, - shutdown: visor_shutdown, - ioctl: visor_ioctl, - set_termios: visor_set_termios, - write: visor_write, - write_room: visor_write_room, - chars_in_buffer: visor_chars_in_buffer, - write_bulk_callback: visor_write_bulk_callback, - read_bulk_callback: visor_read_bulk_callback, + .owner = THIS_MODULE, + .name = "Handspring Visor / Palm 4.0 / Clié 4.x", + .id_table = id_table, + .num_interrupt_in = 0, + .num_bulk_in = 2, + .num_bulk_out = 2, + .num_ports = 2, + .open = visor_open, + .close = visor_close, + .throttle = visor_throttle, + .unthrottle = visor_unthrottle, + .probe = visor_probe, + .calc_num_ports = visor_calc_num_ports, + .shutdown = visor_shutdown, + .ioctl = visor_ioctl, + .set_termios = visor_set_termios, + .write = visor_write, + .write_room = visor_write_room, + .chars_in_buffer = visor_chars_in_buffer, + .write_bulk_callback = visor_write_bulk_callback, + .read_bulk_callback = visor_read_bulk_callback, }; /* device info for the Sony Clie OS version 3.5 */ static struct usb_serial_device_type clie_3_5_device = { - owner: THIS_MODULE, - name: "Sony Clié 3.5", - id_table: clie_id_3_5_table, - num_interrupt_in: 0, - num_bulk_in: 1, - num_bulk_out: 1, - num_ports: 1, - open: visor_open, - close: visor_close, - throttle: visor_throttle, - unthrottle: visor_unthrottle, - attach: clie_3_5_startup, - ioctl: visor_ioctl, - set_termios: visor_set_termios, - write: visor_write, - write_room: visor_write_room, - chars_in_buffer: visor_chars_in_buffer, - write_bulk_callback: visor_write_bulk_callback, - read_bulk_callback: visor_read_bulk_callback, + .owner = THIS_MODULE, + .name = "Sony Clié 3.5", + .id_table = clie_id_3_5_table, + .num_interrupt_in = 0, + .num_bulk_in = 1, + .num_bulk_out = 1, + .num_ports = 1, + .open = visor_open, + .close = visor_close, + .throttle = visor_throttle, + .unthrottle = visor_unthrottle, + .attach = clie_3_5_startup, + .ioctl = visor_ioctl, + .set_termios = visor_set_termios, + .write = visor_write, + .write_room = visor_write_room, + .chars_in_buffer = visor_chars_in_buffer, + .write_bulk_callback = visor_write_bulk_callback, + .read_bulk_callback = visor_read_bulk_callback, }; diff -Nru a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c --- a/drivers/usb/serial/whiteheat.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/serial/whiteheat.c Sat Jul 20 12:12:35 2002 @@ -130,32 +130,32 @@ static void whiteheat_shutdown (struct usb_serial *serial); static struct usb_serial_device_type whiteheat_fake_device = { - owner: THIS_MODULE, - name: "Connect Tech - WhiteHEAT - (prerenumeration)", - id_table: id_table_prerenumeration, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: NUM_DONT_CARE, - num_bulk_out: NUM_DONT_CARE, - num_ports: 1, - probe: whiteheat_firmware_download, + .owner = THIS_MODULE, + .name = "Connect Tech - WhiteHEAT - (prerenumeration)", + .id_table = id_table_prerenumeration, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 1, + .probe = whiteheat_firmware_download, }; static struct usb_serial_device_type whiteheat_device = { - owner: THIS_MODULE, - name: "Connect Tech - WhiteHEAT", - id_table: id_table_std, - num_interrupt_in: NUM_DONT_CARE, - num_bulk_in: NUM_DONT_CARE, - num_bulk_out: NUM_DONT_CARE, - num_ports: 4, - open: whiteheat_open, - close: whiteheat_close, - throttle: whiteheat_throttle, - unthrottle: whiteheat_unthrottle, - ioctl: whiteheat_ioctl, - set_termios: whiteheat_set_termios, - attach: whiteheat_attach, - shutdown: whiteheat_shutdown, + .owner = THIS_MODULE, + .name = "Connect Tech - WhiteHEAT", + .id_table = id_table_std, + .num_interrupt_in = NUM_DONT_CARE, + .num_bulk_in = NUM_DONT_CARE, + .num_bulk_out = NUM_DONT_CARE, + .num_ports = 4, + .open = whiteheat_open, + .close = whiteheat_close, + .throttle = whiteheat_throttle, + .unthrottle = whiteheat_unthrottle, + .ioctl = whiteheat_ioctl, + .set_termios = whiteheat_set_termios, + .attach = whiteheat_attach, + .shutdown = whiteheat_shutdown, }; struct whiteheat_private { diff -Nru a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c --- a/drivers/usb/storage/scsiglue.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/storage/scsiglue.c Sat Jul 20 12:12:35 2002 @@ -354,29 +354,29 @@ */ Scsi_Host_Template usb_stor_host_template = { - name: "usb-storage", - proc_info: proc_info, - info: host_info, + .name = "usb-storage", + .proc_info = proc_info, + .info = host_info, - detect: detect, - release: release, - command: command, - queuecommand: queuecommand, + .detect = detect, + .release = release, + .command = command, + .queuecommand = queuecommand, - eh_abort_handler: command_abort, - eh_device_reset_handler:device_reset, - eh_bus_reset_handler: bus_reset, - eh_host_reset_handler: host_reset, + .eh_abort_handler = command_abort, + .eh_device_reset_handler =device_reset, + .eh_bus_reset_handler = bus_reset, + .eh_host_reset_handler =host_reset, - can_queue: 1, - this_id: -1, + .can_queue = 1, + .this_id = -1, - sg_tablesize: SG_ALL, - cmd_per_lun: 1, - present: 0, - unchecked_isa_dma: FALSE, - use_clustering: TRUE, - emulated: TRUE + .sg_tablesize = SG_ALL, + .cmd_per_lun = 1, + .present = 0, + .unchecked_isa_dma = FALSE, + .use_clustering = TRUE, + .emulated = TRUE }; unsigned char usb_stor_sense_notready[18] = { diff -Nru a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c --- a/drivers/usb/storage/usb.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/storage/usb.c Sat Jul 20 12:12:35 2002 @@ -170,12 +170,12 @@ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ - vendorName: vendor_name, \ - productName: product_name, \ - useProtocol: use_protocol, \ - useTransport: use_transport, \ + .vendorName = vendor_name, \ + .productName = product_name, \ + .useProtocol = use_protocol, \ + .useTransport = use_transport, \ initFunction : init_function, \ - flags: Flags, \ + .flags = Flags, \ } static struct us_unusual_dev us_unusual_dev_list[] = { @@ -228,10 +228,10 @@ }; struct usb_driver usb_storage_driver = { - name: "usb-storage", - probe: storage_probe, - disconnect: storage_disconnect, - id_table: storage_usb_ids, + .name = "usb-storage", + .probe = storage_probe, + .disconnect = storage_disconnect, + .id_table = storage_usb_ids, }; /* diff -Nru a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c --- a/drivers/usb/usb-skeleton.c Sat Jul 20 12:12:35 2002 +++ b/drivers/usb/usb-skeleton.c Sat Jul 20 12:12:35 2002 @@ -174,22 +174,22 @@ * and decrement it again in the release() function * yourself. */ - owner: THIS_MODULE, + .owner = THIS_MODULE, - read: skel_read, - write: skel_write, - ioctl: skel_ioctl, - open: skel_open, - release: skel_release, + .read = skel_read, + .write = skel_write, + .ioctl = skel_ioctl, + .open = skel_open, + .release = skel_release, }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver skel_driver = { - name: "skeleton", - probe: skel_probe, - disconnect: skel_disconnect, - id_table: skel_table, + .name = "skeleton", + .probe = skel_probe, + .disconnect = skel_disconnect, + .id_table = skel_table, }; diff -Nru a/fs/block_dev.c b/fs/block_dev.c --- a/fs/block_dev.c Sat Jul 20 12:12:35 2002 +++ b/fs/block_dev.c Sat Jul 20 12:12:35 2002 @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -195,6 +196,7 @@ }; static struct vfsmount *bd_mnt; +struct super_block *blockdev_superblock; /* * bdev cache handling - shamelessly stolen from inode.c @@ -250,6 +252,7 @@ err = PTR_ERR(bd_mnt); if (IS_ERR(bd_mnt)) panic("Cannot create bdev pseudo-fs"); + blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ } /* @@ -566,13 +569,6 @@ } } } - if (bdev->bd_inode->i_data.backing_dev_info == - &default_backing_dev_info) { - struct backing_dev_info *bdi = blk_get_backing_dev_info(bdev); - if (bdi == NULL) - bdi = &default_backing_dev_info; - inode->i_data.backing_dev_info = bdi; - } if (bdev->bd_op->open) { ret = bdev->bd_op->open(inode, file); if (ret) @@ -593,6 +589,16 @@ bdev->bd_queue = p->queue(dev); else bdev->bd_queue = &p->request_queue; + if (bdev->bd_inode->i_data.backing_dev_info == + &default_backing_dev_info) { + struct backing_dev_info *bdi; + + bdi = blk_get_backing_dev_info(bdev); + if (bdi == NULL) + bdi = &default_backing_dev_info; + inode->i_data.backing_dev_info = bdi; + bdev->bd_inode->i_data.backing_dev_info = bdi; + } } bdev->bd_openers++; unlock_kernel(); diff -Nru a/fs/buffer.c b/fs/buffer.c --- a/fs/buffer.c Sat Jul 20 12:12:34 2002 +++ b/fs/buffer.c Sat Jul 20 12:12:34 2002 @@ -2078,7 +2078,7 @@ */ int cont_prepare_write(struct page *page, unsigned offset, - unsigned to, get_block_t *get_block, unsigned long *bytes) + unsigned to, get_block_t *get_block, loff_t *bytes) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; @@ -2308,55 +2308,6 @@ get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } - -#if 0 -int generic_direct_IO(int rw, struct inode *inode, - struct kiobuf *iobuf, unsigned long blocknr, - int blocksize, get_block_t *get_block) -{ - int i, nr_blocks, retval = 0; - sector_t *blocks = iobuf->blocks; - struct block_device *bdev = NULL; - - nr_blocks = iobuf->length / blocksize; - /* build the blocklist */ - for (i = 0; i < nr_blocks; i++, blocknr++) { - struct buffer_head bh; - - bh.b_state = 0; - bh.b_size = blocksize; - - retval = get_block(inode, blocknr, &bh, rw & 1); - if (retval) - goto out; - - if (rw == READ) { - if (buffer_new(&bh)) - BUG(); - if (!buffer_mapped(&bh)) { - /* there was an hole in the filesystem */ - blocks[i] = -1UL; - continue; - } - } else { - if (buffer_new(&bh)) - unmap_underlying_metadata(bh.b_bdev, - bh.b_blocknr); - if (!buffer_mapped(&bh)) - BUG(); - } - blocks[i] = bh.b_blocknr; - bdev = bh.b_bdev; - } - - /* This does not understand multi-device filesystems currently */ - if (bdev) - retval = brw_kiovec(rw, 1, &iobuf, bdev, blocks, blocksize); - - out: - return retval; -} -#endif /* * Start I/O on a physical range of kernel memory, defined by a vector diff -Nru a/fs/direct-io.c b/fs/direct-io.c --- a/fs/direct-io.c Sat Jul 20 12:12:34 2002 +++ b/fs/direct-io.c Sat Jul 20 12:12:34 2002 @@ -1,5 +1,5 @@ /* - * mm/direct-io.c + * fs/direct-io.c * * Copyright (C) 2002, Linus Torvalds. * @@ -61,7 +61,7 @@ atomic_t bio_count; spinlock_t bio_list_lock; struct bio *bio_list; /* singly linked via bi_private */ - wait_queue_head_t wait_q; + struct task_struct *waiter; }; /* @@ -81,6 +81,7 @@ int nr_pages; nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES); + down_read(¤t->mm->mmap_sem); ret = get_user_pages( current, /* Task for fault acounting */ current->mm, /* whose pages? */ @@ -90,6 +91,7 @@ 0, /* force (?) */ &dio->pages[0], NULL); /* vmas */ + up_read(¤t->mm->mmap_sem); if (ret >= 0) { dio->curr_user_address += ret * PAGE_SIZE; @@ -139,7 +141,7 @@ bio->bi_private = dio->bio_list; dio->bio_list = bio; spin_unlock_irqrestore(&dio->bio_list_lock, flags); - wake_up(&dio->wait_q); + wake_up_process(dio->waiter); } static int @@ -193,13 +195,11 @@ */ static struct bio *dio_await_one(struct dio *dio) { - DECLARE_WAITQUEUE(wait, current); unsigned long flags; struct bio *bio; spin_lock_irqsave(&dio->bio_list_lock, flags); while (dio->bio_list == NULL) { - add_wait_queue(&dio->wait_q, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (dio->bio_list == NULL) { spin_unlock_irqrestore(&dio->bio_list_lock, flags); @@ -208,7 +208,6 @@ spin_lock_irqsave(&dio->bio_list_lock, flags); } set_current_state(TASK_RUNNING); - remove_wait_queue(&dio->wait_q, &wait); } bio = dio->bio_list; dio->bio_list = bio->bi_private; @@ -224,23 +223,17 @@ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec; int page_no; - int ret = 0; for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { struct page *page = bvec[page_no].bv_page; - if (!uptodate) { - if (ret == 0) - ret = -EIO; - } - if (dio->rw == READ) set_page_dirty(page); page_cache_release(page); } atomic_dec(&dio->bio_count); bio_put(bio); - return ret; + return uptodate ? 0 : -EIO; } /* @@ -265,7 +258,7 @@ * to keep the memory consumption sane we periodically reap any completed BIOs * during the BIO generation phase. * - * This also helps to limis the peak amount of pinned userspace memory. + * This also helps to limit the peak amount of pinned userspace memory. */ static int dio_bio_reap(struct dio *dio) { @@ -388,15 +381,13 @@ return ret; } -struct dio *g_dio; - int generic_direct_IO(int rw, struct inode *inode, char *buf, loff_t offset, size_t count, get_block_t get_block) { const unsigned blocksize_mask = (1 << inode->i_blkbits) - 1; const unsigned long user_addr = (unsigned long)buf; - int ret = 0; + int ret; int ret2; struct dio dio; size_t bytes; @@ -407,8 +398,6 @@ goto out; } - g_dio = &dio; - /* BIO submission state */ dio.bio = NULL; dio.bvec = NULL; @@ -444,11 +433,9 @@ atomic_set(&dio.bio_count, 0); spin_lock_init(&dio.bio_list_lock); dio.bio_list = NULL; - init_waitqueue_head(&dio.wait_q); + dio.waiter = current; - down_read(¤t->mm->mmap_sem); ret = do_direct_IO(&dio); - up_read(¤t->mm->mmap_sem); if (dio.bio) dio_bio_submit(&dio); diff -Nru a/fs/exec.c b/fs/exec.c --- a/fs/exec.c Sat Jul 20 12:12:35 2002 +++ b/fs/exec.c Sat Jul 20 12:12:35 2002 @@ -36,6 +36,7 @@ #include #include #include +#include #define __NO_VERSION__ #include #include @@ -283,6 +284,7 @@ flush_dcache_page(page); flush_page_to_ram(page); set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY)))); + page_add_rmap(page, pte); pte_unmap(pte); tsk->mm->rss++; spin_unlock(&tsk->mm->page_table_lock); @@ -621,6 +623,7 @@ { int mode; struct inode * inode = bprm->file->f_dentry->d_inode; + int retval; mode = inode->i_mode; /* @@ -650,27 +653,10 @@ bprm->e_gid = inode->i_gid; } - /* We don't have VFS support for capabilities yet */ - cap_clear(bprm->cap_inheritable); - cap_clear(bprm->cap_permitted); - cap_clear(bprm->cap_effective); - - /* To support inheritance of root-permissions and suid-root - * executables under compatibility mode, we raise all three - * capability sets for the file. - * - * If only the real uid is 0, we only raise the inheritable - * and permitted sets of the executable file. - */ - - if (!issecure(SECURE_NOROOT)) { - if (bprm->e_uid == 0 || current->uid == 0) { - cap_set_full(bprm->cap_inheritable); - cap_set_full(bprm->cap_permitted); - } - if (bprm->e_uid == 0) - cap_set_full(bprm->cap_effective); - } + /* fill in binprm security blob */ + retval = security_ops->bprm_set_security(bprm); + if (retval) + return retval; memset(bprm->buf,0,BINPRM_BUF_SIZE); return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE); @@ -693,16 +679,9 @@ void compute_creds(struct linux_binprm *bprm) { - kernel_cap_t new_permitted, working; int do_unlock = 0; - new_permitted = cap_intersect(bprm->cap_permitted, cap_bset); - working = cap_intersect(bprm->cap_inheritable, - current->cap_inheritable); - new_permitted = cap_combine(new_permitted, working); - - if (bprm->e_uid != current->uid || bprm->e_gid != current->gid || - !cap_issubset(new_permitted, current->cap_permitted)) { + if (bprm->e_uid != current->uid || bprm->e_gid != current->gid) { current->mm->dumpable = 0; lock_kernel(); @@ -714,32 +693,17 @@ bprm->e_uid = current->uid; bprm->e_gid = current->gid; } - if(!capable(CAP_SETPCAP)) { - new_permitted = cap_intersect(new_permitted, - current->cap_permitted); - } } do_unlock = 1; } - - /* For init, we want to retain the capabilities set - * in the init_task struct. Thus we skip the usual - * capability rules */ - if (current->pid != 1) { - current->cap_permitted = new_permitted; - current->cap_effective = - cap_intersect(new_permitted, bprm->cap_effective); - } - - /* AUD: Audit candidate if current->cap_effective is set */ - current->suid = current->euid = current->fsuid = bprm->e_uid; current->sgid = current->egid = current->fsgid = bprm->e_gid; if(do_unlock) unlock_kernel(); - current->keep_capabilities = 0; + + security_ops->bprm_compute_creds(bprm); } @@ -809,6 +773,10 @@ } } #endif + retval = security_ops->bprm_check_security(bprm); + if (retval) + return retval; + /* kernel module loader fixup */ /* so we don't try to load run modprobe in kernel space. */ set_fs(USER_DS); @@ -885,7 +853,7 @@ bprm.sh_bang = 0; bprm.loader = 0; bprm.exec = 0; - + bprm.security = NULL; bprm.mm = mm_alloc(); retval = -ENOMEM; if (!bprm.mm) @@ -903,6 +871,10 @@ if ((retval = bprm.envc) < 0) goto out_mm; + retval = security_ops->bprm_alloc_security(&bprm); + if (retval) + goto out; + retval = prepare_binprm(&bprm); if (retval < 0) goto out; @@ -921,9 +893,11 @@ goto out; retval = search_binary_handler(&bprm,regs); - if (retval >= 0) + if (retval >= 0) { /* execve success */ + security_ops->bprm_free_security(&bprm); return retval; + } out: /* Something went wrong, return the inode and free the argument pages*/ @@ -932,6 +906,9 @@ if (page) __free_page(page); } + + if (bprm.security) + security_ops->bprm_free_security(&bprm); out_mm: mmdrop(bprm.mm); diff -Nru a/fs/fat/file.c b/fs/fat/file.c --- a/fs/fat/file.c Sat Jul 20 12:12:35 2002 +++ b/fs/fat/file.c Sat Jul 20 12:12:35 2002 @@ -54,7 +54,7 @@ } if (!create) return 0; - if (iblock << sb->s_blocksize_bits != MSDOS_I(inode)->mmu_private) { + if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) { BUG(); return -EIO; } diff -Nru a/fs/fat/inode.c b/fs/fat/inode.c --- a/fs/fat/inode.c Sat Jul 20 12:12:35 2002 +++ b/fs/fat/inode.c Sat Jul 20 12:12:35 2002 @@ -417,7 +417,7 @@ } inode->i_blksize = 1 << sbi->cluster_bits; inode->i_blocks = ((inode->i_size + inode->i_blksize - 1) - & ~(inode->i_blksize - 1)) >> 9; + & ~((loff_t)inode->i_blksize - 1)) >> 9; MSDOS_I(inode)->i_logstart = 0; MSDOS_I(inode)->mmu_private = inode->i_size; @@ -775,6 +775,8 @@ sbi->fat_length = CF_LE_L(b->fat32_length); sbi->root_cluster = CF_LE_L(b->root_cluster); + sb->s_maxbytes = 0xffffffff; + /* MC - if info_sector is 0, don't multiply by 0 */ sbi->fsinfo_sector = CF_LE_W(b->info_sector); if (sbi->fsinfo_sector == 0) @@ -1063,7 +1065,7 @@ /* this is as close to the truth as we can get ... */ inode->i_blksize = 1 << sbi->cluster_bits; inode->i_blocks = ((inode->i_size + inode->i_blksize - 1) - & ~(inode->i_blksize - 1)) >> 9; + & ~((loff_t)inode->i_blksize - 1)) >> 9; inode->i_mtime = inode->i_atime = date_dos2unix(CF_LE_W(de->time),CF_LE_W(de->date)); inode->i_ctime = diff -Nru a/fs/fcntl.c b/fs/fcntl.c --- a/fs/fcntl.c Sat Jul 20 12:12:34 2002 +++ b/fs/fcntl.c Sat Jul 20 12:12:34 2002 @@ -245,27 +245,9 @@ } if (arg & O_DIRECT) { - if (inode->i_mapping && inode->i_mapping->a_ops) { - if (!inode->i_mapping->a_ops->direct_IO) + if (!inode->i_mapping || !inode->i_mapping->a_ops || + !inode->i_mapping->a_ops->direct_IO) return -EINVAL; - } - - /* - * alloc_kiovec() can sleep and we are only serialized by - * the big kernel lock here, so abuse the i_sem to serialize - * this case too. We of course wouldn't need to go deep down - * to the inode layer, we could stay at the file layer, but - * we don't want to pay for the memory of a semaphore in each - * file structure too and we use the inode semaphore that we just - * pay for anyways. - */ - error = 0; - down(&inode->i_sem); - if (!filp->f_iobuf) - error = alloc_kiovec(1, &filp->f_iobuf); - up(&inode->i_sem); - if (error < 0) - return error; } /* required for strict SunOS emulation */ diff -Nru a/fs/file_table.c b/fs/file_table.c --- a/fs/file_table.c Sat Jul 20 12:12:34 2002 +++ b/fs/file_table.c Sat Jul 20 12:12:34 2002 @@ -115,9 +115,6 @@ locks_remove_flock(file); - if (file->f_iobuf) - free_kiovec(1, &file->f_iobuf); - if (file->f_op && file->f_op->release) file->f_op->release(inode, file); fops_put(file->f_op); diff -Nru a/fs/fs-writeback.c b/fs/fs-writeback.c --- a/fs/fs-writeback.c Sat Jul 20 12:12:35 2002 +++ b/fs/fs-writeback.c Sat Jul 20 12:12:35 2002 @@ -19,9 +19,12 @@ #include #include #include +#include #include #include +extern struct super_block *blockdev_superblock; + /** * __mark_inode_dirty - internal function * @inode: inode to mark @@ -91,10 +94,8 @@ * If the inode was already on s_dirty, don't reposition * it (that would break s_dirty time-ordering). */ - if (!was_dirty) { - list_del(&inode->i_list); - list_add(&inode->i_list, &sb->s_dirty); - } + if (!was_dirty) + list_move(&inode->i_list, &sb->s_dirty); } out: spin_unlock(&inode_lock); @@ -133,8 +134,7 @@ struct address_space *mapping = inode->i_mapping; struct super_block *sb = inode->i_sb; - list_del(&inode->i_list); - list_add(&inode->i_list, &sb->s_locked_inodes); + list_move(&inode->i_list, &sb->s_locked_inodes); BUG_ON(inode->i_state & I_LOCK); @@ -212,9 +212,19 @@ * that it can be located for waiting on in __writeback_single_inode(). * * Called under inode_lock. + * + * If `bdi' is non-zero then we're being asked to writeback a specific queue. + * This function assumes that the blockdev superblock's inodes are backed by + * a variety of queues, so all inodes are searched. For other superblocks, + * assume that all inodes are backed by the same queue. + * + * FIXME: this linear search could get expensive with many fileystems. But + * how to fix? We need to go from an address_space to all inodes which share + * a queue with that address_space. */ -static void sync_sb_inodes(struct super_block *sb, int sync_mode, - int *nr_to_write, unsigned long *older_than_this) +static void +sync_sb_inodes(struct backing_dev_info *single_bdi, struct super_block *sb, + int sync_mode, int *nr_to_write, unsigned long *older_than_this) { struct list_head *tmp; struct list_head *head; @@ -228,7 +238,14 @@ struct backing_dev_info *bdi; int really_sync; - /* Was this inode dirtied after __sync_list was called? */ + if (single_bdi && mapping->backing_dev_info != single_bdi) { + if (sb != blockdev_superblock) + break; /* inappropriate superblock */ + list_move(&inode->i_list, &inode->i_sb->s_dirty); + continue; /* not this blockdev */ + } + + /* Was this inode dirtied after sync_sb_inodes was called? */ if (time_after(mapping->dirtied_when, start)) break; @@ -249,8 +266,7 @@ __writeback_single_inode(inode, really_sync, nr_to_write); if (sync_mode == WB_SYNC_HOLD) { mapping->dirtied_when = jiffies; - list_del(&inode->i_list); - list_add(&inode->i_list, &inode->i_sb->s_dirty); + list_move(&inode->i_list, &inode->i_sb->s_dirty); } if (current_is_pdflush()) writeback_release(bdi); @@ -269,23 +285,16 @@ } /* - * Start writeback of dirty pagecache data against all unlocked inodes. - * - * Note: - * We don't need to grab a reference to superblock here. If it has non-empty - * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed - * past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are - * empty. Since __sync_single_inode() regains inode_lock before it finally moves - * inode from superblock lists we are OK. - * - * If `older_than_this' is non-zero then only flush inodes which have a - * flushtime older than *older_than_this. - * - * This is a "memory cleansing" operation, not a "data integrity" operation. + * If `bdi' is non-zero then we will scan the first inode against each + * superblock until we find the matching ones. One group will be the dirty + * inodes against a filesystem. Then when we hit the dummy blockdev superblock, + * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not + * super-efficient but we're about to do a ton of I/O... */ -void writeback_unlocked_inodes(int *nr_to_write, - enum writeback_sync_modes sync_mode, - unsigned long *older_than_this) +static void +__writeback_unlocked_inodes(struct backing_dev_info *bdi, int *nr_to_write, + enum writeback_sync_modes sync_mode, + unsigned long *older_than_this) { struct super_block *sb; @@ -295,7 +304,7 @@ for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { if (!list_empty(&sb->s_dirty)) { spin_unlock(&sb_lock); - sync_sb_inodes(sb, sync_mode, nr_to_write, + sync_sb_inodes(bdi, sb, sync_mode, nr_to_write, older_than_this); spin_lock(&sb_lock); } @@ -306,6 +315,43 @@ spin_unlock(&inode_lock); } +/* + * Start writeback of dirty pagecache data against all unlocked inodes. + * + * Note: + * We don't need to grab a reference to superblock here. If it has non-empty + * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed + * past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are + * empty. Since __sync_single_inode() regains inode_lock before it finally moves + * inode from superblock lists we are OK. + * + * If `older_than_this' is non-zero then only flush inodes which have a + * flushtime older than *older_than_this. + * + * This is a "memory cleansing" operation, not a "data integrity" operation. + */ +void writeback_unlocked_inodes(int *nr_to_write, + enum writeback_sync_modes sync_mode, + unsigned long *older_than_this) +{ + __writeback_unlocked_inodes(NULL, nr_to_write, + sync_mode, older_than_this); +} +/* + * Perform writeback of dirty data against a particular queue. + * + * This is for writer throttling. We don't want processes to write back + * other process's data, espsecially when the other data belongs to a + * different spindle. + */ +void writeback_backing_dev(struct backing_dev_info *bdi, int *nr_to_write, + enum writeback_sync_modes sync_mode, + unsigned long *older_than_this) +{ + __writeback_unlocked_inodes(bdi, nr_to_write, + sync_mode, older_than_this); +} + static void __wait_on_locked(struct list_head *head) { struct list_head * tmp; @@ -336,7 +382,7 @@ nr_to_write = ps.nr_dirty + ps.nr_dirty / 4; spin_lock(&inode_lock); - sync_sb_inodes(sb, wait ? WB_SYNC_ALL : WB_SYNC_HOLD, + sync_sb_inodes(NULL, sb, wait ? WB_SYNC_ALL : WB_SYNC_HOLD, &nr_to_write, NULL); if (wait) __wait_on_locked(&sb->s_locked_inodes); diff -Nru a/fs/jfs/inode.c b/fs/jfs/inode.c --- a/fs/jfs/inode.c Sat Jul 20 12:12:34 2002 +++ b/fs/jfs/inode.c Sat Jul 20 12:12:34 2002 @@ -293,11 +293,10 @@ return generic_block_bmap(mapping, block, jfs_get_block); } -static int jfs_direct_IO(int rw, struct inode *inode, struct kiobuf *iobuf, - unsigned long blocknr, int blocksize) +static int jfs_direct_IO(int rw, struct inode *inode, char *buf, + loff_t offset, size_t count) { - return generic_direct_IO(rw, inode, iobuf, blocknr, - blocksize, jfs_get_block); + return generic_direct_IO(rw, inode, buf, offset, count, jfs_get_block); } struct address_space_operations jfs_aops = { diff -Nru a/fs/mpage.c b/fs/mpage.c --- a/fs/mpage.c Sat Jul 20 12:12:35 2002 +++ b/fs/mpage.c Sat Jul 20 12:12:35 2002 @@ -268,7 +268,7 @@ prefetchw(&page->flags); list_del(&page->list); - if (!add_to_page_cache_unique(page, mapping, page->index)) + if (!add_to_page_cache(page, mapping, page->index)) bio = do_mpage_readpage(bio, page, nr_pages - page_idx, &last_block_in_bio, get_block); diff -Nru a/fs/open.c b/fs/open.c --- a/fs/open.c Sat Jul 20 12:12:34 2002 +++ b/fs/open.c Sat Jul 20 12:12:34 2002 @@ -647,15 +647,6 @@ f->f_op = fops_get(inode->i_fop); file_move(f, &inode->i_sb->s_files); - /* preallocate kiobuf for O_DIRECT */ - f->f_iobuf = NULL; - f->f_iobuf_lock = 0; - if (f->f_flags & O_DIRECT) { - error = alloc_kiovec(1, &f->f_iobuf); - if (error) - goto cleanup_all; - } - if (f->f_op && f->f_op->open) { error = f->f_op->open(inode,f); if (error) @@ -665,17 +656,16 @@ /* NB: we're sure to have correct a_ops only after f_op->open */ if (f->f_flags & O_DIRECT) { - error = -EINVAL; - if (inode->i_mapping && inode->i_mapping->a_ops) - if (!inode->i_mapping->a_ops->direct_IO) + if (!inode->i_mapping || !inode->i_mapping->a_ops || + !inode->i_mapping->a_ops->direct_IO) { + error = -EINVAL; goto cleanup_all; + } } return f; cleanup_all: - if (f->f_iobuf) - free_kiovec(1, &f->f_iobuf); fops_put(f->f_op); if (f->f_mode & FMODE_WRITE) put_write_access(inode); diff -Nru a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c --- a/fs/proc/proc_misc.c Sat Jul 20 12:12:34 2002 +++ b/fs/proc/proc_misc.c Sat Jul 20 12:12:34 2002 @@ -159,7 +159,10 @@ "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" - "Writeback: %8lu kB\n", + "Writeback: %8lu kB\n" + "PageTables: %8lu kB\n" + "PteChainTot: %8lu kB\n" + "PteChainUsed: %8lu kB\n", K(i.totalram), K(i.freeram), K(i.sharedram), @@ -174,7 +177,10 @@ K(i.totalswap), K(i.freeswap), K(ps.nr_dirty), - K(ps.nr_writeback) + K(ps.nr_writeback), + K(ps.nr_page_table_pages), + K(ps.nr_pte_chain_pages), + ps.used_pte_chains_bytes >> 10 ); return proc_calc_metrics(page, start, off, count, eof, len); @@ -347,9 +353,29 @@ } len += sprintf(page + len, - "\nctxt %lu\n" + "\npageallocs %u\n" + "pagefrees %u\n" + "pageactiv %u\n" + "pagedeact %u\n" + "pagefault %u\n" + "majorfault %u\n" + "pagescan %u\n" + "pagesteal %u\n" + "pageoutrun %u\n" + "allocstall %u\n" + "ctxt %lu\n" "btime %lu\n" "processes %lu\n", + kstat.pgalloc, + kstat.pgfree, + kstat.pgactivate, + kstat.pgdeactivate, + kstat.pgfault, + kstat.pgmajfault, + kstat.pgscan, + kstat.pgsteal, + kstat.pageoutrun, + kstat.allocstall, nr_context_switches(), xtime.tv_sec - jif / HZ, total_forks); diff -Nru a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h --- a/include/asm-alpha/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-alpha/ide.h Sat Jul 20 12:12:35 2002 @@ -19,8 +19,6 @@ #define MAX_HWIFS 4 #endif -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { switch (base) { diff -Nru a/include/asm-alpha/rmap.h b/include/asm-alpha/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-alpha/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _ALPHA_RMAP_H +#define _ALPHA_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-arm/ide.h b/include/asm-arm/ide.h --- a/include/asm-arm/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-arm/ide.h Sat Jul 20 12:12:35 2002 @@ -17,8 +17,6 @@ #define MAX_HWIFS 4 #endif -#define ide__sti() __sti() - #include /* diff -Nru a/include/asm-arm/proc-armv/rmap.h b/include/asm-arm/proc-armv/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-arm/proc-armv/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,51 @@ +#ifndef _ARMV_RMAP_H +#define _ARMV_RMAP_H +/* + * linux/include/asm-arm/proc-armv/rmap.h + * + * Architecture dependant parts of the reverse mapping code, + * + * ARM is different since hardware page tables are smaller than + * the page size and Linux uses a "duplicate" one with extra info. + * For rmap this means that the first 2 kB of a page are the hardware + * page tables and the last 2 kB are the software page tables. + */ + +static inline void pgtable_add_rmap(pte_t * ptep, struct mm_struct * mm, unsigned long address) +{ + struct page * page = virt_to_page(ptep); + + page->mm = mm; + page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1); + inc_page_state(nr_page_table_pages); +} + +static inline void pgtable_remove_rmap(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + + page->mm = NULL; + page->index = 0; + dec_page_state(nr_page_table_pages); +} + +static inline struct mm_struct * ptep_to_mm(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + + return page->mm; +} + +/* The page table takes half of the page */ +#define PTE_MASK ((PAGE_SIZE / 2) - 1) + +static inline unsigned long ptep_to_address(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + unsigned long low_bits; + + low_bits = ((unsigned long)ptep & PTE_MASK) * PTRS_PER_PTE; + return page->index + low_bits; +} + +#endif /* _ARMV_RMAP_H */ diff -Nru a/include/asm-arm/rmap.h b/include/asm-arm/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-arm/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,6 @@ +#ifndef _ARM_RMAP_H +#define _ARM_RMAP_H + +#include + +#endif /* _ARM_RMAP_H */ diff -Nru a/include/asm-cris/ide.h b/include/asm-cris/ide.h --- a/include/asm-cris/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-cris/ide.h Sat Jul 20 12:12:35 2002 @@ -22,8 +22,6 @@ #define MAX_HWIFS 4 -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { /* all IDE busses share the same IRQ, number 4. diff -Nru a/include/asm-cris/rmap.h b/include/asm-cris/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-cris/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _CRIS_RMAP_H +#define _CRIS_RMAP_H + +/* nothing to see, move along :) */ +#include + +#endif diff -Nru a/include/asm-generic/rmap.h b/include/asm-generic/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-generic/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,54 @@ +#ifndef _GENERIC_RMAP_H +#define _GENERIC_RMAP_H +/* + * linux/include/asm-generic/rmap.h + * + * Architecture dependant parts of the reverse mapping code, + * this version should work for most architectures with a + * 'normal' page table layout. + * + * We use the struct page of the page table page to find out + * the process and full address of a page table entry: + * - page->mapping points to the process' mm_struct + * - page->index has the high bits of the address + * - the lower bits of the address are calculated from the + * offset of the page table entry within the page table page + */ +#include + +static inline void pgtable_add_rmap(struct page * page, struct mm_struct * mm, unsigned long address) +{ +#ifdef BROKEN_PPC_PTE_ALLOC_ONE + /* OK, so PPC calls pte_alloc() before mem_map[] is setup ... ;( */ + extern int mem_init_done; + + if (!mem_init_done) + return; +#endif + page->mapping = (void *)mm; + page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1); + inc_page_state(nr_page_table_pages); +} + +static inline void pgtable_remove_rmap(struct page * page) +{ + page->mapping = NULL; + page->index = 0; + dec_page_state(nr_page_table_pages); +} + +static inline struct mm_struct * ptep_to_mm(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + return (struct mm_struct *) page->mapping; +} + +static inline unsigned long ptep_to_address(pte_t * ptep) +{ + struct page * page = virt_to_page(ptep); + unsigned long low_bits; + low_bits = ((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE; + return page->index + low_bits; +} + +#endif /* _GENERIC_RMAP_H */ diff -Nru a/include/asm-i386/ide.h b/include/asm-i386/ide.h --- a/include/asm-i386/ide.h Sat Jul 20 12:12:34 2002 +++ b/include/asm-i386/ide.h Sat Jul 20 12:12:34 2002 @@ -23,8 +23,6 @@ # endif #endif -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { switch (base) { diff -Nru a/include/asm-i386/rmap.h b/include/asm-i386/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-i386/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _I386_RMAP_H +#define _I386_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h --- a/include/asm-ia64/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-ia64/ide.h Sat Jul 20 12:12:35 2002 @@ -25,8 +25,6 @@ # endif #endif -#define ide__sti() __sti() - static __inline__ int ide_default_irq (ide_ioreg_t base) { diff -Nru a/include/asm-ia64/rmap.h b/include/asm-ia64/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-ia64/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _IA64_RMAP_H +#define _IA64_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-m68k/ide.h b/include/asm-m68k/ide.h --- a/include/asm-m68k/ide.h Sat Jul 20 12:12:34 2002 +++ b/include/asm-m68k/ide.h Sat Jul 20 12:12:34 2002 @@ -171,36 +171,5 @@ } } #endif /* CONFIG_ATARI */ - -/* - * On the Atari, we sometimes can't enable interrupts: - */ - -/* MSch: changed sti() to STI() wherever possible in ide.c; moved STI() def. - * to asm/ide.h - */ -/* The Atari interrupt structure strictly requires that the IPL isn't lowered - * uncontrolled in an interrupt handler. In the concrete case, the IDE - * interrupt is already a slow int, so the irq is already disabled at the time - * the handler is called, and the IPL has been lowered to the minimum value - * possible. To avoid going below that, STI() checks for being called inside - * an interrupt, and in that case it does nothing. Hope that is reasonable and - * works. (Roman) - */ -#ifdef MACH_ATARI_ONLY -#define ide__sti() \ - do { \ - if (!in_interrupt()) __sti(); \ - } while(0) -#elif defined(CONFIG_ATARI) -#define ide__sti() \ - do { \ - if (!MACH_IS_ATARI || !in_interrupt()) sti(); \ - } while(0) -#else /* !defined(CONFIG_ATARI) */ -#define ide__sti() __sti() -#endif - #endif /* __KERNEL__ */ - #endif /* _M68K_IDE_H */ diff -Nru a/include/asm-m68k/rmap.h b/include/asm-m68k/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-m68k/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _M68K_RMAP_H +#define _M68K_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-mips/ide.h b/include/asm-mips/ide.h --- a/include/asm-mips/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-mips/ide.h Sat Jul 20 12:12:35 2002 @@ -24,8 +24,6 @@ # endif #endif -#define ide__sti() __sti() - struct ide_ops { int (*ide_default_irq)(ide_ioreg_t base); ide_ioreg_t (*ide_default_io_base)(int index); diff -Nru a/include/asm-mips/rmap.h b/include/asm-mips/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-mips/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _MIPS_RMAP_H +#define _MIPS_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-mips64/ide.h b/include/asm-mips64/ide.h --- a/include/asm-mips64/ide.h Sat Jul 20 12:12:34 2002 +++ b/include/asm-mips64/ide.h Sat Jul 20 12:12:34 2002 @@ -27,8 +27,6 @@ # endif #endif -#define ide__sti() __sti() - struct ide_ops { int (*ide_default_irq)(ide_ioreg_t base); ide_ioreg_t (*ide_default_io_base)(int index); diff -Nru a/include/asm-mips64/rmap.h b/include/asm-mips64/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-mips64/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _MIPS64_RMAP_H +#define _MIPS64_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h --- a/include/asm-parisc/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-parisc/ide.h Sat Jul 20 12:12:35 2002 @@ -19,8 +19,6 @@ #define MAX_HWIFS 10 #endif -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { switch (base) { diff -Nru a/include/asm-parisc/rmap.h b/include/asm-parisc/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-parisc/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _PARISC_RMAP_H +#define _PARISC_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-ppc/ide.h b/include/asm-ppc/ide.h --- a/include/asm-ppc/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-ppc/ide.h Sat Jul 20 12:12:35 2002 @@ -44,8 +44,6 @@ #undef SUPPORT_SLOW_DATA_PORTS #define SUPPORT_SLOW_DATA_PORTS 0 -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { if (ppc_ide_md.default_irq) diff -Nru a/include/asm-ppc/rmap.h b/include/asm-ppc/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-ppc/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,9 @@ +#ifndef _PPC_RMAP_H +#define _PPC_RMAP_H + +/* PPC calls pte_alloc() before mem_map[] is setup ... */ +#define BROKEN_PPC_PTE_ALLOC_ONE + +#include + +#endif diff -Nru a/include/asm-ppc64/ide.h b/include/asm-ppc64/ide.h --- a/include/asm-ppc64/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-ppc64/ide.h Sat Jul 20 12:12:35 2002 @@ -7,7 +7,7 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. - */ + */ /* * This file contains the ppc64 architecture specific IDE code. @@ -19,10 +19,8 @@ #ifdef __KERNEL__ #ifndef MAX_HWIFS -#define MAX_HWIFS 4 +# define MAX_HWIFS 4 #endif - -#define ide__sti() __sti() static __inline__ int ide_default_irq(ide_ioreg_t base) { return 0; } static __inline__ ide_ioreg_t ide_default_io_base(int index) { return 0; } diff -Nru a/include/asm-s390/ide.h b/include/asm-s390/ide.h --- a/include/asm-s390/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-s390/ide.h Sat Jul 20 12:12:35 2002 @@ -15,8 +15,6 @@ #define MAX_HWIFS 0 #endif -#define ide__sti() do {} while (0) - /* * We always use the new IDE port registering, * so these are fixed here. diff -Nru a/include/asm-s390/rmap.h b/include/asm-s390/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-s390/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _S390_RMAP_H +#define _S390_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-s390x/ide.h b/include/asm-s390x/ide.h --- a/include/asm-s390x/ide.h Sat Jul 20 12:12:34 2002 +++ /dev/null Wed Dec 31 16:00:00 1969 @@ -1,29 +0,0 @@ -/* - * linux/include/asm-s390/ide.h - * - * Copyright (C) 1994-1996 Linus Torvalds & authors - */ - -/* s390 does not have IDE */ - -#ifndef __ASMS390_IDE_H -#define __ASMS390_IDE_H - -#ifdef __KERNEL__ - -#ifndef MAX_HWIFS -#define MAX_HWIFS 0 -#endif - -#define ide__sti() do {} while (0) - -/* - * We always use the new IDE port registering, - * so these are fixed here. - */ -#define ide_default_io_base(i) ((ide_ioreg_t)0) -#define ide_default_irq(b) (0) - -#endif /* __KERNEL__ */ - -#endif /* __ASMS390_IDE_H */ diff -Nru a/include/asm-s390x/rmap.h b/include/asm-s390x/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-s390x/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _S390X_RMAP_H +#define _S390X_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-sh/ide.h b/include/asm-sh/ide.h --- a/include/asm-sh/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-sh/ide.h Sat Jul 20 12:12:35 2002 @@ -22,8 +22,6 @@ #define MAX_HWIFS 2 #endif -#define ide__sti() __sti() - static __inline__ int ide_default_irq_hp600(ide_ioreg_t base) { switch (base) { diff -Nru a/include/asm-sh/rmap.h b/include/asm-sh/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-sh/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _SH_RMAP_H +#define _SH_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h --- a/include/asm-sparc/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-sparc/ide.h Sat Jul 20 12:12:35 2002 @@ -20,8 +20,6 @@ #undef MAX_HWIFS #define MAX_HWIFS 2 -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { return 0; diff -Nru a/include/asm-sparc/rmap.h b/include/asm-sparc/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-sparc/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _SPARC_RMAP_H +#define _SPARC_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-sparc64/ide.h b/include/asm-sparc64/ide.h --- a/include/asm-sparc64/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-sparc64/ide.h Sat Jul 20 12:12:35 2002 @@ -20,8 +20,6 @@ #undef MAX_HWIFS #define MAX_HWIFS 2 -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { return 0; diff -Nru a/include/asm-sparc64/rmap.h b/include/asm-sparc64/rmap.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/asm-sparc64/rmap.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +#ifndef _SPARC64_RMAP_H +#define _SPARC64_RMAP_H + +/* nothing to see, move along */ +#include + +#endif diff -Nru a/include/asm-x86_64/ide.h b/include/asm-x86_64/ide.h --- a/include/asm-x86_64/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/asm-x86_64/ide.h Sat Jul 20 12:12:35 2002 @@ -23,8 +23,6 @@ # endif #endif -#define ide__sti() __sti() - static __inline__ int ide_default_irq(ide_ioreg_t base) { switch (base) { diff -Nru a/include/linux/adfs_fs_i.h b/include/linux/adfs_fs_i.h --- a/include/linux/adfs_fs_i.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/adfs_fs_i.h Sat Jul 20 12:12:35 2002 @@ -11,7 +11,7 @@ * adfs file system inode data in memory */ struct adfs_inode_info { - unsigned long mmu_private; + loff_t mmu_private; unsigned long parent_id; /* object id of parent */ __u32 loadaddr; /* RISC OS load address */ __u32 execaddr; /* RISC OS exec address */ diff -Nru a/include/linux/affs_fs_i.h b/include/linux/affs_fs_i.h --- a/include/linux/affs_fs_i.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/affs_fs_i.h Sat Jul 20 12:12:35 2002 @@ -35,7 +35,7 @@ struct affs_ext_key *i_ac; /* associative cache of extended blocks */ u32 i_ext_last; /* last accessed extended block */ struct buffer_head *i_ext_bh; /* bh of last extended block */ - unsigned long mmu_private; + loff_t mmu_private; u32 i_protect; /* unused attribute bits */ u32 i_lastalloc; /* last allocated block */ int i_pa_cnt; /* number of preallocated blocks */ diff -Nru a/include/linux/agp_backend.h b/include/linux/agp_backend.h --- a/include/linux/agp_backend.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/agp_backend.h Sat Jul 20 12:12:35 2002 @@ -81,13 +81,13 @@ HP_ZX1, }; -typedef struct _agp_version { +struct agp_version { u16 major; u16 minor; -} agp_version; +}; typedef struct _agp_kern_info { - agp_version version; + struct agp_version version; struct pci_dev *device; enum chipset_type chipset; unsigned long mode; diff -Nru a/include/linux/agpgart.h b/include/linux/agpgart.h --- a/include/linux/agpgart.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/agpgart.h Sat Jul 20 12:12:34 2002 @@ -53,13 +53,13 @@ #include #include -typedef struct _agp_version { +struct agp_version { __u16 major; __u16 minor; -} agp_version; +}; typedef struct _agp_info { - agp_version version; /* version of the driver */ + struct agp_version version; /* version of the driver */ __u32 bridge_id; /* bridge vendor/device */ __u32 agp_mode; /* mode info of bridge */ off_t aper_base; /* base of aperture */ @@ -117,7 +117,7 @@ #define AGP_LOCK_INIT() sema_init(&(agp_fe.agp_mutex), 1) #ifndef _AGP_BACKEND_H -typedef struct _agp_version { +struct _agp_version { u16 major; u16 minor; } agp_version; @@ -125,7 +125,7 @@ #endif typedef struct _agp_info { - agp_version version; /* version of the driver */ + struct agp_version version; /* version of the driver */ u32 bridge_id; /* bridge vendor/device */ u32 agp_mode; /* mode info of bridge */ off_t aper_base; /* base of aperture */ diff -Nru a/include/linux/atapi.h b/include/linux/atapi.h --- a/include/linux/atapi.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/atapi.h Sat Jul 20 12:12:35 2002 @@ -74,15 +74,6 @@ } s; }; -extern void atapi_init_pc(struct atapi_packet_command *pc); - -extern void atapi_discard_data(struct ata_device *, unsigned int); -extern void atapi_write_zeros(struct ata_device *, unsigned int); - -extern void atapi_read(struct ata_device *, u8 *, unsigned int); -extern void atapi_write(struct ata_device *, u8 *, unsigned int); - - /* * ATAPI Status Register. */ @@ -360,3 +351,20 @@ u8 sk_specific[2]; /* Sense Key Specific */ u8 pad[2]; /* Padding to 20 bytes */ } atapi_request_sense_result_t; + + +extern void atapi_init_pc(struct atapi_packet_command *pc); + +extern void atapi_discard_data(struct ata_device *, unsigned int); +extern void atapi_write_zeros(struct ata_device *, unsigned int); + +extern void atapi_read(struct ata_device *, u8 *, unsigned int); +extern void atapi_write(struct ata_device *, u8 *, unsigned int); + +typedef enum { + ide_wait, /* insert rq at end of list, and wait for it */ + ide_preempt, /* insert rq in front of current request */ + ide_end /* insert rq at end of list, but don't wait for it */ +} ide_action_t; + +extern int ide_do_drive_cmd(struct ata_device *, struct request *, ide_action_t); diff -Nru a/include/linux/binfmts.h b/include/linux/binfmts.h --- a/include/linux/binfmts.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/binfmts.h Sat Jul 20 12:12:35 2002 @@ -28,6 +28,7 @@ struct file * file; int e_uid, e_gid; kernel_cap_t cap_inheritable, cap_permitted, cap_effective; + void *security; int argc, envc; char * filename; /* Name of binary */ unsigned long loader, exec; diff -Nru a/include/linux/buffer_head.h b/include/linux/buffer_head.h --- a/include/linux/buffer_head.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/buffer_head.h Sat Jul 20 12:12:34 2002 @@ -178,7 +178,7 @@ int block_read_full_page(struct page*, get_block_t*); int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*, - unsigned long *); + loff_t *); int generic_cont_expand(struct inode *inode, loff_t size) ; int block_commit_write(struct page *page, unsigned from, unsigned to); int block_sync_page(struct page *); diff -Nru a/include/linux/fs.h b/include/linux/fs.h --- a/include/linux/fs.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/fs.h Sat Jul 20 12:12:35 2002 @@ -274,7 +274,6 @@ */ struct page; struct address_space; -struct kiobuf; struct address_space_operations { int (*writepage)(struct page *); @@ -493,10 +492,6 @@ /* needed for tty driver, and maybe others */ void *private_data; - - /* preallocated helper kiobuf to speedup O_DIRECT */ - struct kiobuf *f_iobuf; - long f_iobuf_lock; }; extern spinlock_t files_lock; #define file_list_lock() spin_lock(&files_lock); diff -Nru a/include/linux/hfs_fs_i.h b/include/linux/hfs_fs_i.h --- a/include/linux/hfs_fs_i.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/hfs_fs_i.h Sat Jul 20 12:12:35 2002 @@ -19,7 +19,7 @@ struct hfs_inode_info { int magic; /* A magic number */ - unsigned long mmu_private; + loff_t mmu_private; struct hfs_cat_entry *entry; /* For a regular or header file */ diff -Nru a/include/linux/hpfs_fs_i.h b/include/linux/hpfs_fs_i.h --- a/include/linux/hpfs_fs_i.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/hpfs_fs_i.h Sat Jul 20 12:12:35 2002 @@ -2,7 +2,7 @@ #define _HPFS_FS_I struct hpfs_inode_info { - unsigned long mmu_private; + loff_t mmu_private; ino_t i_parent_dir; /* (directories) gives fnode of parent dir */ unsigned i_dno; /* (directories) root dnode */ unsigned i_dpos; /* (directories) temp for readdir */ diff -Nru a/include/linux/ide.h b/include/linux/ide.h --- a/include/linux/ide.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/ide.h Sat Jul 20 12:12:35 2002 @@ -1,5 +1,6 @@ #ifndef _IDE_H #define _IDE_H + /* * Copyright (C) 1994-2002 Linus Torvalds & authors */ @@ -57,15 +58,14 @@ */ #define ERROR_MAX 8 /* Max read/write errors per sector */ #define ERROR_RESET 3 /* Reset controller every 4th retry */ -#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ /* - * state flags + * State flags. */ #define DMA_PIO_RETRY 1 /* retrying in PIO */ /* - * Definitions for accessing IDE controller registers + * Definitions for accessing IDE controller registers. */ enum { @@ -192,23 +192,21 @@ * Structure to hold all information about the location of this port */ typedef struct hw_regs_s { - ide_ioreg_t io_ports[IDE_NR_PORTS]; /* task file registers */ - int irq; /* our irq number */ - int dma; /* our dma entry */ - ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ + ide_ioreg_t io_ports[IDE_NR_PORTS]; /* task file registers */ + int irq; /* our irq number */ + int dma; /* our dma entry */ + int (*ack_intr)(struct ata_channel *); /* acknowledge interrupt */ hwif_chipset_t chipset; } hw_regs_t; /* * Set up hw_regs_t structure before calling ide_register_hw (optional) */ -void ide_setup_ports(hw_regs_t *hw, - ide_ioreg_t base, - int *offsets, - ide_ioreg_t ctrl, - ide_ioreg_t intr, - ide_ack_intr_t *ack_intr, - int irq); +extern void ide_setup_ports(hw_regs_t *hw, + ide_ioreg_t base, int *offsets, + ide_ioreg_t ctrl, ide_ioreg_t intr, + int (*ack_intr)(struct ata_channel *), + int irq); #include @@ -282,14 +280,10 @@ unsigned int usage; /* current "open()" count for drive */ char type; /* distingiush different devices: disk, cdrom, tape, floppy, ... */ - /* NOTE: If we had proper separation between channel and host chip, we - * could move this to the channel and many sync problems would - * magically just go away. - */ - request_queue_t queue; /* per device request queue */ + request_queue_t queue; /* per device request queue */ struct request *rq; /* current request */ - unsigned long sleep; /* sleep until this time */ + unsigned long sleep; /* sleep until this time */ byte retry_pio; /* retrying dma capable host in pio */ byte state; /* retry state */ @@ -341,6 +335,7 @@ void *driver_data; /* extra driver data */ devfs_handle_t de; /* directory for device */ + char driver_req[10]; /* requests specific driver */ int last_lun; /* last logical unit */ @@ -392,6 +387,7 @@ enum { IDE_BUSY, /* awaiting an interrupt */ IDE_SLEEP, + IDE_PIO, /* PIO in progress */ IDE_DMA /* DMA in progress */ }; @@ -404,11 +400,15 @@ */ spinlock_t *lock; unsigned long *active; /* active processing request */ - ide_startstop_t (*handler)(struct ata_device *, struct request *); /* irq handler, if active */ + + /* FIXME: Only still used in PDC4030. Localize this code there by + * replacing with busy waits. + */ struct timer_list timer; /* failsafe timer */ ide_startstop_t (*expiry)(struct ata_device *, struct request *, unsigned long *); /* irq handler, if active */ unsigned long poll_timeout; /* timeout value during polled operations */ + struct ata_device *drive; /* last serviced drive */ @@ -508,8 +508,6 @@ extern int ide_register_hw(hw_regs_t *hw); extern void ide_unregister(struct ata_channel *); -struct ata_taskfile; - #define IDE_MAX_TAG 32 #ifdef CONFIG_BLK_DEV_IDE_TCQ @@ -605,8 +603,7 @@ #define DEVICE_NR(device) (minor(device) >> PARTN_BITS) #include -extern int __ata_end_request(struct ata_device *, struct request *, int, unsigned int); - +extern int ata_end_request(struct ata_device *, struct request *, int, unsigned int); extern void ata_set_handler(struct ata_device *drive, ata_handler_t handler, unsigned long timeout, ata_expiry_t expiry); @@ -627,21 +624,10 @@ struct ata_device *get_info_ptr(kdev_t i_rdev); /* - * "action" parameter type for ide_do_drive_cmd() below. - */ -typedef enum { - ide_wait, /* insert rq at end of list, and wait for it */ - ide_preempt, /* insert rq in front of current request */ - ide_end /* insert rq at end of list, but don't wait for it */ -} ide_action_t; - -/* * temporarily mapping a (possible) highmem bio for PIO transfer */ #define ide_rq_offset(rq) (((rq)->hard_cur_sectors - (rq)->current_nr_sectors) << 9) -extern int ide_do_drive_cmd(struct ata_device *, struct request *, ide_action_t); - struct ata_taskfile { struct hd_drive_task_hdr taskfile; struct hd_drive_task_hdr hobfile; @@ -654,7 +640,6 @@ extern void ata_write(struct ata_device *, void *, unsigned int); extern int ide_raw_taskfile(struct ata_device *, struct ata_taskfile *, char *); -extern void ide_fix_driveid(struct hd_driveid *id); extern int ide_config_drive_speed(struct ata_device *, byte); extern byte eighty_ninty_three(struct ata_device *); @@ -803,13 +788,12 @@ #define DRIVE_LOCK(drive) ((drive)->queue.queue_lock) -extern int drive_is_ready(struct ata_device *drive); - /* Low level device access functions. */ extern void ata_select(struct ata_device *, unsigned long); extern void ata_mask(struct ata_device *); extern int ata_status(struct ata_device *, u8, u8); +extern int ata_status_irq(struct ata_device *drive); extern int ata_status_poll( struct ata_device *, u8, u8, unsigned long, struct request *rq); diff -Nru a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h --- a/include/linux/kernel_stat.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/kernel_stat.h Sat Jul 20 12:12:35 2002 @@ -26,6 +26,11 @@ unsigned int dk_drive_wblk[DK_MAX_MAJOR][DK_MAX_DISK]; unsigned int pgpgin, pgpgout; unsigned int pswpin, pswpout; + unsigned int pgalloc, pgfree; + unsigned int pgactivate, pgdeactivate; + unsigned int pgfault, pgmajfault; + unsigned int pgscan, pgsteal; + unsigned int pageoutrun, allocstall; #if !defined(CONFIG_ARCH_S390) unsigned int irqs[NR_CPUS][NR_IRQS]; #endif @@ -34,6 +39,13 @@ extern struct kernel_stat kstat; extern unsigned long nr_context_switches(void); + +/* + * Maybe we need to smp-ify kernel_stat some day. It would be nice to do + * that without having to modify all the code that increments the stats. + */ +#define KERNEL_STAT_INC(x) kstat.x++ +#define KERNEL_STAT_ADD(x, y) kstat.x += y #if !defined(CONFIG_ARCH_S390) /* diff -Nru a/include/linux/mm.h b/include/linux/mm.h --- a/include/linux/mm.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/mm.h Sat Jul 20 12:12:34 2002 @@ -130,6 +130,9 @@ struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused); }; +/* forward declaration; pte_chain is meant to be internal to rmap.c */ +struct pte_chain; + /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -154,6 +157,11 @@ updated asynchronously */ struct list_head lru; /* Pageout list, eg. active_list; protected by pagemap_lru_lock !! */ + union { + struct pte_chain * chain; /* Reverse pte mapping pointer. + * protected by PG_chainlock */ + pte_t * direct; + } pte; unsigned long private; /* mapping-private opaque data */ /* @@ -453,17 +461,16 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int); /* mm/page-writeback.c */ -int generic_writepages(struct address_space *mapping, int *nr_to_write); int write_one_page(struct page *page, int wait); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ -void do_page_cache_readahead(struct file *file, +int do_page_cache_readahead(struct file *file, unsigned long offset, unsigned long nr_to_read); void page_cache_readahead(struct file *file, unsigned long offset); void page_cache_readaround(struct file *file, unsigned long offset); -void handle_ra_thrashing(struct file *file); +void handle_ra_miss(struct file *file); /* vma is the first one with address < vma->vm_end, * and even address < vma->vm_start. Have to extend vma. */ diff -Nru a/include/linux/mpage.h b/include/linux/mpage.h --- a/include/linux/mpage.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/mpage.h Sat Jul 20 12:12:35 2002 @@ -16,3 +16,8 @@ int mpage_writepages(struct address_space *mapping, int *nr_to_write, get_block_t get_block); +static inline int +generic_writepages(struct address_space *mapping, int *nr_to_write) +{ + return mpage_writepages(mapping, nr_to_write, NULL); +} diff -Nru a/include/linux/msdos_fs_i.h b/include/linux/msdos_fs_i.h --- a/include/linux/msdos_fs_i.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/msdos_fs_i.h Sat Jul 20 12:12:35 2002 @@ -8,7 +8,7 @@ */ struct msdos_inode_info { - unsigned long mmu_private; + loff_t mmu_private; int i_start; /* first cluster or 0 */ int i_logstart; /* logical first cluster */ int i_attrs; /* unused attribute bits */ diff -Nru a/include/linux/msg.h b/include/linux/msg.h --- a/include/linux/msg.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/msg.h Sat Jul 20 12:12:34 2002 @@ -63,6 +63,35 @@ #ifdef __KERNEL__ +/* one msg_msg structure for each message */ +struct msg_msg { + struct list_head m_list; + long m_type; + int m_ts; /* message text size */ + struct msg_msgseg* next; + /* the actual message follows immediately */ +}; + +#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg)) +#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg)) + +/* one msq_queue structure for each present queue on the system */ +struct msg_queue { + struct kern_ipc_perm q_perm; + time_t q_stime; /* last msgsnd time */ + time_t q_rtime; /* last msgrcv time */ + time_t q_ctime; /* last change time */ + unsigned long q_cbytes; /* current number of bytes on queue */ + unsigned long q_qnum; /* number of messages in queue */ + unsigned long q_qbytes; /* max number of bytes on queue */ + pid_t q_lspid; /* pid of last msgsnd */ + pid_t q_lrpid; /* last receive pid */ + + struct list_head q_messages; + struct list_head q_receivers; + struct list_head q_senders; +}; + asmlinkage long sys_msgget (key_t key, int msgflg); asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg); asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, int msgflg); diff -Nru a/include/linux/page-flags.h b/include/linux/page-flags.h --- a/include/linux/page-flags.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/page-flags.h Sat Jul 20 12:12:34 2002 @@ -47,7 +47,7 @@ * locked- and dirty-page accounting. The top eight bits of page->flags are * used for page->zone, so putting flag bits there doesn't work. */ -#define PG_locked 0 /* Page is locked. Don't touch. */ +#define PG_locked 0 /* Page is locked. Don't touch. */ #define PG_error 1 #define PG_referenced 2 #define PG_uptodate 3 @@ -64,7 +64,10 @@ #define PG_private 12 /* Has something at ->private */ #define PG_writeback 13 /* Page is under writeback */ -#define PG_nosave 15 /* Used for system suspend/resume */ +#define PG_nosave 14 /* Used for system suspend/resume */ +#define PG_chainlock 15 /* lock bit for ->pte_chain */ + +#define PG_direct 16 /* ->pte_chain points directly at pte */ /* * Global page accounting. One instance per CPU. @@ -75,6 +78,9 @@ unsigned long nr_pagecache; unsigned long nr_active; /* on active_list LRU */ unsigned long nr_inactive; /* on inactive_list LRU */ + unsigned long nr_page_table_pages; + unsigned long nr_pte_chain_pages; + unsigned long used_pte_chains_bytes; } ____cacheline_aligned_in_smp page_states[NR_CPUS]; extern void get_page_state(struct page_state *ret); @@ -215,6 +221,37 @@ #define TestSetPageNosave(page) test_and_set_bit(PG_nosave, &(page)->flags) #define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags) #define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags) + +#define PageDirect(page) test_bit(PG_direct, &(page)->flags) +#define SetPageDirect(page) set_bit(PG_direct, &(page)->flags) +#define TestSetPageDirect(page) test_and_set_bit(PG_direct, &(page)->flags) +#define ClearPageDirect(page) clear_bit(PG_direct, &(page)->flags) +#define TestClearPageDirect(page) test_and_clear_bit(PG_direct, &(page)->flags) + +/* + * inlines for acquisition and release of PG_chainlock + */ +static inline void pte_chain_lock(struct page *page) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); + while (test_and_set_bit(PG_chainlock, &page->flags)) { + while (test_bit(PG_chainlock, &page->flags)) + cpu_relax(); + } +} + +static inline void pte_chain_unlock(struct page *page) +{ + clear_bit(PG_chainlock, &page->flags); + preempt_enable(); +} /* * The PageSwapCache predicate doesn't use a PG_flag at this time, diff -Nru a/include/linux/pagemap.h b/include/linux/pagemap.h --- a/include/linux/pagemap.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/pagemap.h Sat Jul 20 12:12:34 2002 @@ -52,8 +52,6 @@ extern int add_to_page_cache(struct page *page, struct address_space *mapping, unsigned long index); -extern int add_to_page_cache_unique(struct page *page, - struct address_space *mapping, unsigned long index); static inline void ___add_to_page_cache(struct page *page, struct address_space *mapping, unsigned long index) diff -Nru a/include/linux/pci_ids.h b/include/linux/pci_ids.h --- a/include/linux/pci_ids.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/pci_ids.h Sat Jul 20 12:12:35 2002 @@ -610,6 +610,7 @@ #define PCI_DEVICE_ID_PROMISE_20268 0x4d68 #define PCI_DEVICE_ID_PROMISE_20268R 0x6268 #define PCI_DEVICE_ID_PROMISE_20269 0x4d69 +#define PCI_DEVICE_ID_PROMISE_20271 0x6269 #define PCI_DEVICE_ID_PROMISE_20275 0x1275 #define PCI_DEVICE_ID_PROMISE_20276 0x5275 #define PCI_DEVICE_ID_PROMISE_5300 0x5300 diff -Nru a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h --- a/include/linux/qnx4_fs.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/qnx4_fs.h Sat Jul 20 12:12:35 2002 @@ -106,7 +106,7 @@ struct qnx4_inode_info { struct qnx4_inode_entry raw; - unsigned long mmu_private; + loff_t mmu_private; struct inode vfs_inode; }; diff -Nru a/include/linux/raid/md.h b/include/linux/raid/md.h --- a/include/linux/raid/md.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/raid/md.h Sat Jul 20 12:12:34 2002 @@ -75,7 +75,6 @@ extern void md_unregister_thread (mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_interrupt_thread (mdk_thread_t *thread); -extern void md_update_sb (mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors); extern int md_error (mddev_t *mddev, struct block_device *bdev); diff -Nru a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h --- a/include/linux/raid/md_k.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/raid/md_k.h Sat Jul 20 12:12:34 2002 @@ -25,13 +25,16 @@ #define MULTIPATH 7UL #define MAX_PERSONALITY 8UL +#define LEVEL_MULTIPATH (-4) +#define LEVEL_LINEAR (-1) + static inline int pers_to_level (int pers) { switch (pers) { - case MULTIPATH: return -4; + case MULTIPATH: return LEVEL_MULTIPATH; case HSM: return -3; case TRANSLUCENT: return -2; - case LINEAR: return -1; + case LINEAR: return LEVEL_LINEAR; case RAID0: return 0; case RAID1: return 1; case RAID5: return 5; @@ -43,10 +46,10 @@ static inline int level_to_pers (int level) { switch (level) { - case -4: return MULTIPATH; + case LEVEL_MULTIPATH: return MULTIPATH; case -3: return HSM; case -2: return TRANSLUCENT; - case -1: return LINEAR; + case LEVEL_LINEAR: return LINEAR; case 0: return RAID0; case 1: return RAID1; case 4: @@ -140,11 +143,7 @@ struct mdk_rdev_s { struct list_head same_set; /* RAID devices within the same set */ - struct list_head all; /* all RAID devices */ - struct list_head pending; /* undetected RAID devices */ - kdev_t dev; /* Device number */ - kdev_t old_dev; /* "" when it was last imported */ unsigned long size; /* Device size (in blocks) */ mddev_t *mddev; /* RAID array if running */ unsigned long last_events; /* IO event timestamp */ @@ -157,7 +156,10 @@ int alias_device; /* device alias to the same disk */ int faulty; /* if faulty do not issue IO requests */ + int in_sync; /* device is a full member of the array */ + int desc_nr; /* descriptor index in the superblock */ + int raid_disk; /* role of device in array */ }; typedef struct mdk_personality_s mdk_personality_t; @@ -167,11 +169,25 @@ void *private; mdk_personality_t *pers; int __minor; - mdp_super_t *sb; struct list_head disks; int sb_dirty; int ro; + /* Superblock information */ + int major_version, + minor_version, + patch_version; + int persistent; + int chunk_size; + time_t ctime, utime; + int level, layout; + int raid_disks; + unsigned long state; + sector_t size; /* used size of component devices */ + __u64 events; + + char uuid[16]; + struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ unsigned long curr_resync; /* blocks scheduled */ unsigned long resync_mark; /* a recent timestamp */ @@ -186,7 +202,11 @@ int in_sync; /* know to not need resync */ struct semaphore reconfig_sem; atomic_t active; - mdp_disk_t *spare; + mdk_rdev_t *spare; + + int degraded; /* whether md should consider + * adding a spare + */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; @@ -204,11 +224,11 @@ int (*stop)(mddev_t *mddev); int (*status)(char *page, mddev_t *mddev); int (*error_handler)(mddev_t *mddev, struct block_device *bdev); - int (*hot_add_disk) (mddev_t *mddev, mdp_disk_t *descriptor, mdk_rdev_t *rdev); + int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_remove_disk) (mddev_t *mddev, int number); - int (*spare_write) (mddev_t *mddev, int number); + int (*spare_write) (mddev_t *mddev); int (*spare_inactive) (mddev_t *mddev); - int (*spare_active) (mddev_t *mddev, mdp_disk_t **descriptor); + int (*spare_active) (mddev_t *mddev); int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster); }; @@ -228,38 +248,30 @@ return mk_kdev(MD_MAJOR, mdidx(mddev)); } -extern mdk_rdev_t * find_rdev(mddev_t * mddev, kdev_t dev); extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); -extern mdp_disk_t *get_spare(mddev_t *mddev); +extern mdk_rdev_t *get_spare(mddev_t *mddev); /* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ -#define ITERATE_RDEV_GENERIC(head,field,rdev,tmp) \ +#define ITERATE_RDEV_GENERIC(head,rdev,tmp) \ \ for ((tmp) = (head).next; \ - (rdev) = (list_entry((tmp), mdk_rdev_t, field)), \ + (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ (tmp) = (tmp)->next, (tmp)->prev != &(head) \ ; ) /* * iterates through the 'same array disks' ringlist */ #define ITERATE_RDEV(mddev,rdev,tmp) \ - ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp) - - -/* - * Iterates through all 'RAID managed disks' - */ -#define ITERATE_RDEV_ALL(rdev,tmp) \ - ITERATE_RDEV_GENERIC(all_raid_disks,all,rdev,tmp) + ITERATE_RDEV_GENERIC((mddev)->disks,rdev,tmp) /* * Iterates through 'pending RAID disks' */ #define ITERATE_RDEV_PENDING(rdev,tmp) \ - ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp) + ITERATE_RDEV_GENERIC(pending_raid_disks,rdev,tmp) #define xchg_values(x,y) do { __typeof__(x) __tmp = x; \ x = y; y = __tmp; } while (0) diff -Nru a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h --- a/include/linux/raid/multipath.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/raid/multipath.h Sat Jul 20 12:12:35 2002 @@ -2,17 +2,15 @@ #define _MULTIPATH_H #include +#include struct multipath_info { - int number; - int raid_disk; struct block_device *bdev; /* * State bits: */ int operational; - int spare; int used_slot; }; @@ -20,23 +18,12 @@ struct multipath_private_data { mddev_t *mddev; struct multipath_info multipaths[MD_SB_DISKS]; - int nr_disks; int raid_disks; int working_disks; mdk_thread_t *thread; - struct multipath_info *spare; spinlock_t device_lock; - /* buffer pool */ - /* buffer_heads that we have pre-allocated have b_pprev -> &freebh - * and are linked into a stack using b_next - * multipath_bh that are pre-allocated have MPBH_PreAlloc set. - * All these variable are protected by device_lock - */ - struct multipath_bh *freer1; - int freer1_blocked; - int freer1_cnt; - wait_queue_head_t wait_buffer; + mempool_t *pool; }; typedef struct multipath_private_data multipath_conf_t; @@ -54,18 +41,10 @@ */ struct multipath_bh { - atomic_t remaining; /* 'have we finished' count, - * used from IRQ handlers - */ - int cmd; - unsigned long state; mddev_t *mddev; struct bio *master_bio; - struct bio *bio; - struct multipath_bh *next_mp; /* next for retry or in free list */ + struct bio bio; + int path; + struct multipath_bh *next_mp; /* next for retry */ }; -/* bits for multipath_bh.state */ -#define MPBH_Uptodate 1 -#define MPBH_SyncPhase 2 -#define MPBH_PreAlloc 3 /* this was pre-allocated, add to free list */ #endif diff -Nru a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h --- a/include/linux/raid/raid1.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/raid/raid1.h Sat Jul 20 12:12:35 2002 @@ -6,8 +6,6 @@ typedef struct mirror_info mirror_info_t; struct mirror_info { - int number; - int raid_disk; struct block_device *bdev; sector_t head_position; atomic_t nr_pending; @@ -27,7 +25,6 @@ struct r1_private_data_s { mddev_t *mddev; mirror_info_t mirrors[MD_SB_DISKS]; - int nr_disks; int raid_disks; int working_disks; int last_used; diff -Nru a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h --- a/include/linux/raid/raid5.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/raid/raid5.h Sat Jul 20 12:12:35 2002 @@ -194,8 +194,6 @@ struct disk_info { struct block_device *bdev; int operational; - int number; - int raid_disk; int write_only; int spare; int used_slot; diff -Nru a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/sched.h Sat Jul 20 12:12:34 2002 @@ -354,6 +354,8 @@ void *notifier_data; sigset_t *notifier_mask; + void *security; + /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; @@ -587,10 +589,9 @@ unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); -/* - * capable() checks for a particular capability. - * See include/linux/capability.h for defined capabilities. - */ +/* capable prototype and code moved to security.[hc] */ +#include +#if 0 static inline int capable(int cap) { if (cap_raised(current->cap_effective, cap)) { @@ -599,6 +600,7 @@ } return 0; } +#endif /* if 0 */ /* * Routines for handling mm_structs diff -Nru a/include/linux/security.h b/include/linux/security.h --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/include/linux/security.h Sat Jul 20 12:12:35 2002 @@ -0,0 +1,383 @@ +/* + * Linux Security plug + * + * Copyright (C) 2001 WireX Communications, Inc + * Copyright (C) 2001 Greg Kroah-Hartman + * Copyright (C) 2001 Networks Associates Technology, Inc + * Copyright (C) 2001 James Morris + * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Due to this file being licensed under the GPL there is controversy over + * whether this permits you to write a module that #includes this file + * without placing your module under the GPL. Please consult a lawyer for + * advice before doing this. + * + */ + +#ifndef __LINUX_SECURITY_H +#define __LINUX_SECURITY_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Values used in the task_security_ops calls + */ +/* setuid or setgid, id0 == uid or gid */ +#define LSM_SETID_ID 1 + +/* setreuid or setregid, id0 == real, id1 == eff */ +#define LSM_SETID_RE 2 + +/* setresuid or setresgid, id0 == real, id1 == eff, uid2 == saved */ +#define LSM_SETID_RES 4 + +/* setfsuid or setfsgid, id0 == fsuid or fsgid */ +#define LSM_SETID_FS 8 + +/* forward declares to avoid warnings */ +struct sk_buff; +struct net_device; +struct nfsctl_arg; +struct sched_param; +struct swap_info_struct; + +/** + * struct security_operations - main security structure + * + * Security hooks for program execution operations. + * + * @bprm_alloc_security: + * Allocate and attach a security structure to the @bprm->security field. + * The security field is initialized to NULL when the bprm structure is + * allocated. + * @bprm contains the linux_binprm structure to be modified. + * Return 0 if operation was successful. + * @bprm_free_security: + * @bprm contains the linux_binprm structure to be modified. + * Deallocate and clear the @bprm->security field. + * @bprm_compute_creds: + * Compute and set the security attributes of a process being transformed + * by an execve operation based on the old attributes (current->security) + * and the information saved in @bprm->security by the set_security hook. + * Since this hook function (and its caller) are void, this hook can not + * return an error. However, it can leave the security attributes of the + * process unchanged if an access failure occurs at this point. It can + * also perform other state changes on the process (e.g. closing open + * file descriptors to which access is no longer granted if the attributes + * were changed). + * @bprm contains the linux_binprm structure. + * @bprm_set_security: + * Save security information in the bprm->security field, typically based + * on information about the bprm->file, for later use by the compute_creds + * hook. This hook may also optionally check permissions (e.g. for + * transitions between security domains). + * This hook may be called multiple times during a single execve, e.g. for + * interpreters. The hook can tell whether it has already been called by + * checking to see if @bprm->security is non-NULL. If so, then the hook + * may decide either to retain the security information saved earlier or + * to replace it. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_check_security: + * This hook mediates the point when a search for a binary handler will + * begin. It allows a check the @bprm->security value which is set in + * the preceding set_security call. The primary difference from + * set_security is that the argv list and envp list are reliably + * available in @bprm. This hook may be called multiple times + * during a single execve; and in each pass set_security is called + * first. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * + * Security hooks for task operations. + * + * @task_create: + * Check permission before creating a child process. See the clone(2) + * manual page for definitions of the @clone_flags. + * @clone_flags contains the flags indicating what should be shared. + * Return 0 if permission is granted. + * @task_alloc_security: + * @p contains the task_struct for child process. + * Allocate and attach a security structure to the p->security field. The + * security field is initialized to NULL when the task structure is + * allocated. + * Return 0 if operation was successful. + * @task_free_security: + * @p contains the task_struct for process. + * Deallocate and clear the p->security field. + * @task_setuid: + * Check permission before setting one or more of the user identity + * attributes of the current process. The @flags parameter indicates + * which of the set*uid system calls invoked this hook and how to + * interpret the @id0, @id1, and @id2 parameters. See the LSM_SETID + * definitions at the beginning of this file for the @flags values and + * their meanings. + * @id0 contains a uid. + * @id1 contains a uid. + * @id2 contains a uid. + * @flags contains one of the LSM_SETID_* values. + * Return 0 if permission is granted. + * @task_post_setuid: + * Update the module's state after setting one or more of the user + * identity attributes of the current process. The @flags parameter + * indicates which of the set*uid system calls invoked this hook. If + * @flags is LSM_SETID_FS, then @old_ruid is the old fs uid and the other + * parameters are not used. + * @old_ruid contains the old real uid (or fs uid if LSM_SETID_FS). + * @old_euid contains the old effective uid (or -1 if LSM_SETID_FS). + * @old_suid contains the old saved uid (or -1 if LSM_SETID_FS). + * @flags contains one of the LSM_SETID_* values. + * Return 0 on success. + * @task_setgid: + * Check permission before setting one or more of the group identity + * attributes of the current process. The @flags parameter indicates + * which of the set*gid system calls invoked this hook and how to + * interpret the @id0, @id1, and @id2 parameters. See the LSM_SETID + * definitions at the beginning of this file for the @flags values and + * their meanings. + * @id0 contains a gid. + * @id1 contains a gid. + * @id2 contains a gid. + * @flags contains one of the LSM_SETID_* values. + * Return 0 if permission is granted. + * @task_setpgid: + * Check permission before setting the process group identifier of the + * process @p to @pgid. + * @p contains the task_struct for process being modified. + * @pgid contains the new pgid. + * Return 0 if permission is granted. + * @task_getpgid: + * Check permission before getting the process group identifier of the + * process @p. + * @p contains the task_struct for the process. + * Return 0 if permission is granted. + * @task_getsid: + * Check permission before getting the session identifier of the process + * @p. + * @p contains the task_struct for the process. + * Return 0 if permission is granted. + * @task_setgroups: + * Check permission before setting the supplementary group set of the + * current process to @grouplist. + * @gidsetsize contains the number of elements in @grouplist. + * @grouplist contains the array of gids. + * Return 0 if permission is granted. + * @task_setnice: + * Check permission before setting the nice value of @p to @nice. + * @p contains the task_struct of process. + * @nice contains the new nice value. + * Return 0 if permission is granted. + * @task_setrlimit: + * Check permission before setting the resource limits of the current + * process for @resource to @new_rlim. The old resource limit values can + * be examined by dereferencing (current->rlim + resource). + * @resource contains the resource whose limit is being set. + * @new_rlim contains the new limits for @resource. + * Return 0 if permission is granted. + * @task_setscheduler: + * Check permission before setting scheduling policy and/or parameters of + * process @p based on @policy and @lp. + * @p contains the task_struct for process. + * @policy contains the scheduling policy. + * @lp contains the scheduling parameters. + * Return 0 if permission is granted. + * @task_getscheduler: + * Check permission before obtaining scheduling information for process + * @p. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_kill: + * Check permission before sending signal @sig to @p. @info can be NULL, + * the constant 1, or a pointer to a siginfo structure. If @info is 1 or + * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming + * from the kernel and should typically be permitted. + * SIGIO signals are handled separately by the send_sigiotask hook in + * file_security_ops. + * @p contains the task_struct for process. + * @info contains the signal information. + * @sig contains the signal value. + * Return 0 if permission is granted. + * @task_wait: + * Check permission before allowing a process to reap a child process @p + * and collect its status information. + * @p contains the task_struct for process. + * Return 0 if permission is granted. + * @task_prctl: + * Check permission before performing a process control operation on the + * current process. + * @option contains the operation. + * @arg2 contains a argument. + * @arg3 contains a argument. + * @arg4 contains a argument. + * @arg5 contains a argument. + * Return 0 if permission is granted. + * @task_kmod_set_label: + * Set the security attributes in current->security for the kernel module + * loader thread, so that it has the permissions needed to perform its + * function. + * @task_reparent_to_init: + * Set the security attributes in @p->security for a kernel thread that + * is being reparented to the init task. + * @p contains the task_struct for the kernel thread. + * + * @ptrace: + * Check permission before allowing the @parent process to trace the + * @child process. + * Security modules may also want to perform a process tracing check + * during an execve in the set_security or compute_creds hooks of + * binprm_security_ops if the process is being traced and its security + * attributes would be changed by the execve. + * @parent contains the task_struct structure for parent process. + * @child contains the task_struct structure for child process. + * Return 0 if permission is granted. + * @capget: + * Get the @effective, @inheritable, and @permitted capability sets for + * the @target process. The hook may also perform permission checking to + * determine if the current process is allowed to see the capability sets + * of the @target process. + * @target contains the task_struct structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * Return 0 if the capability sets were successfully obtained. + * @capset_check: + * Check permission before setting the @effective, @inheritable, and + * @permitted capability sets for the @target process. + * Caveat: @target is also set to current if a set of processes is + * specified (i.e. all processes other than current and init or a + * particular process group). Hence, the capset_set hook may need to + * revalidate permission to the actual target process. + * @target contains the task_struct structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * Return 0 if permission is granted. + * @capset_set: + * Set the @effective, @inheritable, and @permitted capability sets for + * the @target process. Since capset_check cannot always check permission + * to the real @target process, this hook may also perform permission + * checking to determine if the current process is allowed to set the + * capability sets of the @target process. However, this hook has no way + * of returning an error due to the structure of the sys_capset code. + * @target contains the task_struct structure for target process. + * @effective contains the effective capability set. + * @inheritable contains the inheritable capability set. + * @permitted contains the permitted capability set. + * @capable: + * Check whether the @tsk process has the @cap capability. + * @tsk contains the task_struct for the process. + * @cap contains the capability . + * Return 0 if the capability is granted for @tsk. + * @sys_security: + * Security modules may use this hook to implement new system calls for + * security-aware applications. The interface is similar to socketcall, + * but with an @id parameter to help identify the security module whose + * call is being invoked. The module is responsible for interpreting the + * parameters, and must copy in the @args array from user space if it is + * used. + * The recommended convention for creating the hexadecimal @id value is + * echo "Name_of_module" | md5sum | cut -c -8; by using this convention, + * there is no need for a central registry. + * @id contains the security module identifier. + * @call contains the call value. + * @args contains the call arguments (user space pointer). + * The module should return -ENOSYS if it does not implement any new + * system calls. + * + * @register_security: + * allow module stacking. + * @name contains the name of the security module being stacked. + * @ops contains a pointer to the struct security_operations of the module to stack. + * @unregister_security: + * remove a stacked module. + * @name contains the name of the security module being unstacked. + * @ops contains a pointer to the struct security_operations of the module to unstack. + * + * This is the main security structure. + */ +struct security_operations { + int (*ptrace) (struct task_struct * parent, struct task_struct * child); + int (*capget) (struct task_struct * target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, kernel_cap_t * permitted); + int (*capset_check) (struct task_struct * target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, + kernel_cap_t * permitted); + void (*capset_set) (struct task_struct * target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, + kernel_cap_t * permitted); + int (*capable) (struct task_struct * tsk, int cap); + int (*sys_security) (unsigned int id, unsigned call, + unsigned long *args); + + int (*bprm_alloc_security) (struct linux_binprm * bprm); + void (*bprm_free_security) (struct linux_binprm * bprm); + void (*bprm_compute_creds) (struct linux_binprm * bprm); + int (*bprm_set_security) (struct linux_binprm * bprm); + int (*bprm_check_security) (struct linux_binprm * bprm); + + int (*task_create) (unsigned long clone_flags); + int (*task_alloc_security) (struct task_struct * p); + void (*task_free_security) (struct task_struct * p); + int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); + int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ , + uid_t old_euid, uid_t old_suid, int flags); + int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags); + int (*task_setpgid) (struct task_struct * p, pid_t pgid); + int (*task_getpgid) (struct task_struct * p); + int (*task_getsid) (struct task_struct * p); + int (*task_setgroups) (int gidsetsize, gid_t * grouplist); + int (*task_setnice) (struct task_struct * p, int nice); + int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim); + int (*task_setscheduler) (struct task_struct * p, int policy, + struct sched_param * lp); + int (*task_getscheduler) (struct task_struct * p); + int (*task_kill) (struct task_struct * p, + struct siginfo * info, int sig); + int (*task_wait) (struct task_struct * p); + int (*task_prctl) (int option, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5); + void (*task_kmod_set_label) (void); + void (*task_reparent_to_init) (struct task_struct * p); + + /* allow module stacking */ + int (*register_security) (const char *name, + struct security_operations *ops); + int (*unregister_security) (const char *name, + struct security_operations *ops); +}; + + +/* prototypes */ +extern int security_scaffolding_startup (void); +extern int register_security (struct security_operations *ops); +extern int unregister_security (struct security_operations *ops); +extern int mod_reg_security (const char *name, struct security_operations *ops); +extern int mod_unreg_security (const char *name, struct security_operations *ops); +extern int capable (int cap); + +/* global variables */ +extern struct security_operations *security_ops; + + +#endif /* __KERNEL__ */ + +#endif /* ! __LINUX_SECURITY_H */ + diff -Nru a/include/linux/shm.h b/include/linux/shm.h --- a/include/linux/shm.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/shm.h Sat Jul 20 12:12:35 2002 @@ -71,6 +71,19 @@ }; #ifdef __KERNEL__ +struct shmid_kernel /* private to the kernel */ +{ + struct kern_ipc_perm shm_perm; + struct file * shm_file; + int id; + unsigned long shm_nattch; + unsigned long shm_segsz; + time_t shm_atim; + time_t shm_dtim; + time_t shm_ctim; + pid_t shm_cprid; + pid_t shm_lprid; +}; /* shm_mode upper byte flags */ #define SHM_DEST 01000 /* segment will be destroyed on last detach */ diff -Nru a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h --- a/include/linux/sunrpc/xprt.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/sunrpc/xprt.h Sat Jul 20 12:12:35 2002 @@ -37,7 +37,7 @@ #define RPC_MAXREQS RPC_MAXCONG #define RPC_CWNDSCALE (256) #define RPC_MAXCWND (RPC_MAXCONG * RPC_CWNDSCALE) -#define RPC_INITCWND (RPC_MAXCWND >> 1) +#define RPC_INITCWND RPC_CWNDSCALE #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) /* Default timeout values */ diff -Nru a/include/linux/swap.h b/include/linux/swap.h --- a/include/linux/swap.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/swap.h Sat Jul 20 12:12:34 2002 @@ -142,6 +142,19 @@ struct address_space; struct zone_t; +/* linux/mm/rmap.c */ +extern int FASTCALL(page_referenced(struct page *)); +extern void FASTCALL(page_add_rmap(struct page *, pte_t *)); +extern void FASTCALL(page_remove_rmap(struct page *, pte_t *)); +extern int FASTCALL(try_to_unmap(struct page *)); +extern int FASTCALL(page_over_rsslimit(struct page *)); + +/* return values of try_to_unmap */ +#define SWAP_SUCCESS 0 +#define SWAP_AGAIN 1 +#define SWAP_FAIL 2 +#define SWAP_ERROR 3 + /* linux/mm/swap.c */ extern void FASTCALL(lru_cache_add(struct page *)); extern void FASTCALL(__lru_cache_del(struct page *)); @@ -168,6 +181,7 @@ extern void show_swap_cache_info(void); #endif extern int add_to_swap_cache(struct page *, swp_entry_t); +extern int add_to_swap(struct page *); extern void __delete_from_swap_cache(struct page *page); extern void delete_from_swap_cache(struct page *page); extern int move_to_swap_cache(struct page *page, swp_entry_t entry); diff -Nru a/include/linux/sysctl.h b/include/linux/sysctl.h --- a/include/linux/sysctl.h Sat Jul 20 12:12:34 2002 +++ b/include/linux/sysctl.h Sat Jul 20 12:12:34 2002 @@ -72,7 +72,7 @@ /* CTL_BUS names: */ enum { - BUS_ISA=1 /* ISA */ + CTL_BUS_ISA=1 /* ISA */ }; /* CTL_KERN names: */ diff -Nru a/include/linux/usb.h b/include/linux/usb.h --- a/include/linux/usb.h Sat Jul 20 12:12:35 2002 +++ b/include/linux/usb.h Sat Jul 20 12:12:35 2002 @@ -431,6 +431,10 @@ /* for when layers above USB add new non-USB drivers */ extern void usb_scan_devices(void); +/* for probe/disconnect with correct module usage counting */ +void *usb_bind_driver(struct usb_driver *driver, struct usb_device *dev, unsigned int ifnum); +void usb_unbind_driver(struct usb_device *device, struct usb_interface *intf); + /* mostly for devices emulating SCSI over USB */ extern int usb_reset_device(struct usb_device *dev); diff -Nru a/init/main.c b/init/main.c --- a/init/main.c Sat Jul 20 12:12:35 2002 +++ b/init/main.c Sat Jul 20 12:12:35 2002 @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include @@ -389,6 +391,7 @@ fork_init(mempages); proc_caches_init(); + security_scaffolding_startup(); buffer_init(); vfs_caches_init(mempages); radix_tree_init(); @@ -501,6 +504,8 @@ */ free_initmem(); unlock_kernel(); + + kstat.pgfree = 0; if (open("/dev/console", O_RDWR, 0) < 0) printk("Warning: unable to open an initial console.\n"); diff -Nru a/ipc/msg.c b/ipc/msg.c --- a/ipc/msg.c Sat Jul 20 12:12:34 2002 +++ b/ipc/msg.c Sat Jul 20 12:12:34 2002 @@ -52,34 +52,6 @@ struct msg_msgseg* next; /* the next part of the message follows immediately */ }; -/* one msg_msg structure for each message */ -struct msg_msg { - struct list_head m_list; - long m_type; - int m_ts; /* message text size */ - struct msg_msgseg* next; - /* the actual message follows immediately */ -}; - -#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg)) -#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg)) - -/* one msq_queue structure for each present queue on the system */ -struct msg_queue { - struct kern_ipc_perm q_perm; - time_t q_stime; /* last msgsnd time */ - time_t q_rtime; /* last msgrcv time */ - time_t q_ctime; /* last change time */ - unsigned long q_cbytes; /* current number of bytes on queue */ - unsigned long q_qnum; /* number of messages in queue */ - unsigned long q_qbytes; /* max number of bytes on queue */ - pid_t q_lspid; /* pid of last msgsnd */ - pid_t q_lrpid; /* last receive pid */ - - struct list_head q_messages; - struct list_head q_receivers; - struct list_head q_senders; -}; #define SEARCH_ANY 1 #define SEARCH_EQUAL 2 @@ -122,13 +94,15 @@ msq = (struct msg_queue *) kmalloc (sizeof (*msq), GFP_KERNEL); if (!msq) return -ENOMEM; + + msq->q_perm.mode = (msgflg & S_IRWXUGO); + msq->q_perm.key = key; + id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni); if(id == -1) { kfree(msq); return -ENOSPC; } - msq->q_perm.mode = (msgflg & S_IRWXUGO); - msq->q_perm.key = key; msq->q_stime = msq->q_rtime = 0; msq->q_ctime = CURRENT_TIME; diff -Nru a/ipc/sem.c b/ipc/sem.c --- a/ipc/sem.c Sat Jul 20 12:12:35 2002 +++ b/ipc/sem.c Sat Jul 20 12:12:35 2002 @@ -129,15 +129,16 @@ return -ENOMEM; } memset (sma, 0, size); + + sma->sem_perm.mode = (semflg & S_IRWXUGO); + sma->sem_perm.key = key; + id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); if(id == -1) { ipc_free(sma, size); return -ENOSPC; } used_sems += nsems; - - sma->sem_perm.mode = (semflg & S_IRWXUGO); - sma->sem_perm.key = key; sma->sem_base = (struct sem *) &sma[1]; /* sma->sem_pending = NULL; */ diff -Nru a/ipc/shm.c b/ipc/shm.c --- a/ipc/shm.c Sat Jul 20 12:12:35 2002 +++ b/ipc/shm.c Sat Jul 20 12:12:35 2002 @@ -28,20 +28,6 @@ #include "util.h" -struct shmid_kernel /* private to the kernel */ -{ - struct kern_ipc_perm shm_perm; - struct file * shm_file; - int id; - unsigned long shm_nattch; - unsigned long shm_segsz; - time_t shm_atim; - time_t shm_dtim; - time_t shm_ctim; - pid_t shm_cprid; - pid_t shm_lprid; -}; - #define shm_flags shm_perm.mode static struct file_operations shm_file_operations; @@ -193,6 +179,10 @@ shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER); if (!shp) return -ENOMEM; + + shp->shm_perm.key = key; + shp->shm_flags = (shmflg & S_IRWXUGO); + sprintf (name, "SYSV%08x", key); file = shmem_file_setup(name, size); error = PTR_ERR(file); @@ -203,8 +193,7 @@ id = shm_addid(shp); if(id == -1) goto no_id; - shp->shm_perm.key = key; - shp->shm_flags = (shmflg & S_IRWXUGO); + shp->shm_cprid = current->pid; shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; diff -Nru a/kernel/capability.c b/kernel/capability.c --- a/kernel/capability.c Sat Jul 20 12:12:34 2002 +++ b/kernel/capability.c Sat Jul 20 12:12:34 2002 @@ -63,6 +63,7 @@ data.permitted = cap_t(target->cap_permitted); data.inheritable = cap_t(target->cap_inheritable); data.effective = cap_t(target->cap_effective); + ret = security_ops->capget(target, &data.effective, &data.inheritable, &data.permitted); out: read_unlock(&tasklist_lock); @@ -87,9 +88,7 @@ for_each_task(target) { if (target->pgrp != pgrp) continue; - target->cap_effective = *effective; - target->cap_inheritable = *inheritable; - target->cap_permitted = *permitted; + security_ops->capset_set(target, effective, inheritable, permitted); } } @@ -106,9 +105,7 @@ for_each_task(target) { if (target == current || target->pid == 1) continue; - target->cap_effective = *effective; - target->cap_inheritable = *inheritable; - target->cap_permitted = *permitted; + security_ops->capset_set(target, effective, inheritable, permitted); } } @@ -166,7 +163,9 @@ ret = -EPERM; - /* verify restrictions on target's new Inheritable set */ + if (security_ops->capset_check(target, &effective, &inheritable, &permitted)) + goto out; + if (!cap_issubset(inheritable, cap_combine(target->cap_inheritable, current->cap_permitted))) goto out; @@ -182,6 +181,8 @@ ret = 0; + /* having verified that the proposed changes are legal, + we now put them into effect. */ if (pid < 0) { if (pid == -1) /* all procs other than current and init */ cap_set_all(&effective, &inheritable, &permitted); @@ -189,9 +190,7 @@ else /* all procs in process group */ cap_set_pg(-pid, &effective, &inheritable, &permitted); } else { - target->cap_effective = effective; - target->cap_inheritable = inheritable; - target->cap_permitted = permitted; + security_ops->capset_set(target, &effective, &inheritable, &permitted); } out: diff -Nru a/kernel/exit.c b/kernel/exit.c --- a/kernel/exit.c Sat Jul 20 12:12:35 2002 +++ b/kernel/exit.c Sat Jul 20 12:12:35 2002 @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +62,7 @@ wait_task_inactive(p); #endif atomic_dec(&p->user->processes); + security_ops->task_free_security(p); free_uid(p->user); unhash_process(p); @@ -187,10 +189,7 @@ /* cpus_allowed? */ /* rt_priority? */ /* signals? */ - current->cap_effective = CAP_INIT_EFF_SET; - current->cap_inheritable = CAP_INIT_INH_SET; - current->cap_permitted = CAP_FULL_SET; - current->keep_capabilities = 0; + security_ops->task_reparent_to_init(current); memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim))); current->user = INIT_USER; @@ -625,6 +624,10 @@ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) && !(options & __WALL)) continue; + + if (security_ops->task_wait(p)) + continue; + flag = 1; switch (p->state) { case TASK_STOPPED: diff -Nru a/kernel/fork.c b/kernel/fork.c --- a/kernel/fork.c Sat Jul 20 12:12:34 2002 +++ b/kernel/fork.c Sat Jul 20 12:12:34 2002 @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -189,7 +189,6 @@ mm->map_count = 0; mm->rss = 0; mm->cpu_vm_mask = 0; - mm->swap_address = 0; pprev = &mm->mmap; /* @@ -308,9 +307,6 @@ void mmput(struct mm_struct *mm) { if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) { - extern struct mm_struct *swap_mm; - if (swap_mm == mm) - swap_mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist); list_del(&mm->mmlist); mmlist_nr--; spin_unlock(&mmlist_lock); @@ -622,6 +618,10 @@ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); + retval = security_ops->task_create(clone_flags); + if (retval) + goto fork_out; + retval = -ENOMEM; p = dup_task_struct(current); if (!p) @@ -701,13 +701,16 @@ p->array = NULL; p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; + p->security = NULL; INIT_LIST_HEAD(&p->local_pages); retval = -ENOMEM; + if (security_ops->task_alloc_security(p)) + goto bad_fork_cleanup; /* copy all the process information */ if (copy_semundo(clone_flags, p)) - goto bad_fork_cleanup; + goto bad_fork_cleanup_security; if (copy_files(clone_flags, p)) goto bad_fork_cleanup_semundo; if (copy_fs(clone_flags, p)) @@ -816,6 +819,8 @@ exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_semundo(p); +bad_fork_cleanup_security: + security_ops->task_free_security(p); bad_fork_cleanup: put_exec_domain(p->thread_info->exec_domain); if (p->binfmt && p->binfmt->module) diff -Nru a/kernel/kmod.c b/kernel/kmod.c --- a/kernel/kmod.c Sat Jul 20 12:12:35 2002 +++ b/kernel/kmod.c Sat Jul 20 12:12:35 2002 @@ -134,7 +134,7 @@ /* Give kmod all effective privileges.. */ curtask->euid = curtask->fsuid = 0; curtask->egid = curtask->fsgid = 0; - cap_set_full(curtask->cap_effective); + security_ops->task_kmod_set_label(); /* Allow execve args to be in kernel space. */ set_fs(KERNEL_DS); diff -Nru a/kernel/ptrace.c b/kernel/ptrace.c --- a/kernel/ptrace.c Sat Jul 20 12:12:35 2002 +++ b/kernel/ptrace.c Sat Jul 20 12:12:35 2002 @@ -41,7 +41,9 @@ int ptrace_attach(struct task_struct *task) { + int retval; task_lock(task); + retval = -EPERM; if (task->pid <= 1) goto bad; if (task == current) @@ -53,7 +55,6 @@ (current->uid != task->uid) || (current->gid != task->egid) || (current->gid != task->sgid) || - (!cap_issubset(task->cap_permitted, current->cap_permitted)) || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) goto bad; rmb(); @@ -62,6 +63,9 @@ /* the same process cannot be attached many times */ if (task->ptrace & PT_PTRACED) goto bad; + retval = security_ops->ptrace(current, task); + if (retval) + goto bad; /* Go */ task->ptrace |= PT_PTRACED; @@ -82,7 +86,7 @@ bad: task_unlock(task); - return -EPERM; + return retval; } int ptrace_detach(struct task_struct *child, unsigned int data) diff -Nru a/kernel/sched.c b/kernel/sched.c --- a/kernel/sched.c Sat Jul 20 12:12:35 2002 +++ b/kernel/sched.c Sat Jul 20 12:12:35 2002 @@ -26,6 +26,7 @@ #include #include #include +#include /* * Convert user-nice values [ -20 ... 0 ... 19 ] @@ -1123,6 +1124,7 @@ asmlinkage long sys_nice(int increment) { + int retval; long nice; /* @@ -1144,6 +1146,11 @@ nice = -20; if (nice > 19) nice = 19; + + retval = security_ops->task_setnice(current, nice); + if (retval) + return retval; + set_user_nice(current, nice); return 0; } @@ -1236,6 +1243,10 @@ !capable(CAP_SYS_NICE)) goto out_unlock; + retval = security_ops->task_setscheduler(p, policy, &lp); + if (retval) + goto out_unlock; + array = p->array; if (array) deactivate_task(p, task_rq(p)); @@ -1280,8 +1291,11 @@ retval = -ESRCH; read_lock(&tasklist_lock); p = find_process_by_pid(pid); - if (p) - retval = p->policy; + if (p) { + retval = security_ops->task_getscheduler(p); + if (!retval) + retval = p->policy; + } read_unlock(&tasklist_lock); out_nounlock: @@ -1302,6 +1316,11 @@ retval = -ESRCH; if (!p) goto out_unlock; + + retval = security_ops->task_getscheduler(p); + if (retval) + goto out_unlock; + lp.sched_priority = p->rt_priority; read_unlock(&tasklist_lock); @@ -1509,13 +1528,21 @@ retval = -ESRCH; read_lock(&tasklist_lock); p = find_process_by_pid(pid); - if (p) - jiffies_to_timespec(p->policy & SCHED_FIFO ? - 0 : TASK_TIMESLICE(p), &t); + if (!p) + goto out_unlock; + + retval = security_ops->task_getscheduler(p); + if (retval) + goto out_unlock; + + jiffies_to_timespec(p->policy & SCHED_FIFO ? + 0 : TASK_TIMESLICE(p), &t); read_unlock(&tasklist_lock); - if (p) - retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; out_nounlock: + return retval; +out_unlock: + read_unlock(&tasklist_lock); return retval; } diff -Nru a/kernel/signal.c b/kernel/signal.c --- a/kernel/signal.c Sat Jul 20 12:12:35 2002 +++ b/kernel/signal.c Sat Jul 20 12:12:35 2002 @@ -548,6 +548,9 @@ ret = -EPERM; if (bad_signal(sig, info, t)) goto out_nolock; + ret = security_ops->task_kill(t, info, sig); + if (ret) + goto out_nolock; /* The null signal is a permissions and process existence probe. No signal is actually delivered. Same goes for zombies. */ diff -Nru a/kernel/sys.c b/kernel/sys.c --- a/kernel/sys.c Sat Jul 20 12:12:34 2002 +++ b/kernel/sys.c Sat Jul 20 12:12:34 2002 @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -234,6 +235,7 @@ read_lock(&tasklist_lock); for_each_task(p) { + int no_nice; if (!proc_sel(p, which, who)) continue; if (p->uid != current->euid && @@ -243,10 +245,17 @@ } if (error == -ESRCH) error = 0; - if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) + if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) { error = -EACCES; - else - set_user_nice(p, niceval); + continue; + } + no_nice = security_ops->task_setnice(p, niceval); + if (no_nice) { + error = no_nice; + continue; + } + set_user_nice(p, niceval); + } read_unlock(&tasklist_lock); @@ -416,6 +425,11 @@ int old_egid = current->egid; int new_rgid = old_rgid; int new_egid = old_egid; + int retval; + + retval = security_ops->task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); + if (retval) + return retval; if (rgid != (gid_t) -1) { if ((old_rgid == rgid) || @@ -457,6 +471,11 @@ asmlinkage long sys_setgid(gid_t gid) { int old_egid = current->egid; + int retval; + + retval = security_ops->task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); + if (retval) + return retval; if (capable(CAP_SETGID)) { @@ -481,52 +500,6 @@ return 0; } -/* - * cap_emulate_setxuid() fixes the effective / permitted capabilities of - * a process after a call to setuid, setreuid, or setresuid. - * - * 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of - * {r,e,s}uid != 0, the permitted and effective capabilities are - * cleared. - * - * 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective - * capabilities of the process are cleared. - * - * 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective - * capabilities are set to the permitted capabilities. - * - * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should - * never happen. - * - * -astor - * - * cevans - New behaviour, Oct '99 - * A process may, via prctl(), elect to keep its capabilities when it - * calls setuid() and switches away from uid==0. Both permitted and - * effective sets will be retained. - * Without this change, it was impossible for a daemon to drop only some - * of its privilege. The call to setuid(!=0) would drop all privileges! - * Keeping uid 0 is not an option because uid 0 owns too many vital - * files.. - * Thanks to Olaf Kirch and Peter Benie for spotting this. - */ -static inline void cap_emulate_setxuid(int old_ruid, int old_euid, - int old_suid) -{ - if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) && - (current->uid != 0 && current->euid != 0 && current->suid != 0) && - !current->keep_capabilities) { - cap_clear(current->cap_permitted); - cap_clear(current->cap_effective); - } - if (old_euid == 0 && current->euid != 0) { - cap_clear(current->cap_effective); - } - if (old_euid != 0 && current->euid == 0) { - current->cap_effective = current->cap_permitted; - } -} - static int set_user(uid_t new_ruid, int dumpclear) { struct user_struct *new_user, *old_user; @@ -572,6 +545,11 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) { int old_ruid, old_euid, old_suid, new_ruid, new_euid; + int retval; + + retval = security_ops->task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); + if (retval) + return retval; new_ruid = old_ruid = current->uid; new_euid = old_euid = current->euid; @@ -608,11 +586,7 @@ current->suid = current->euid; current->fsuid = current->euid; - if (!issecure(SECURE_NO_SETUID_FIXUP)) { - cap_emulate_setxuid(old_ruid, old_euid, old_suid); - } - - return 0; + return security_ops->task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); } @@ -632,6 +606,11 @@ { int old_euid = current->euid; int old_ruid, old_suid, new_ruid, new_suid; + int retval; + + retval = security_ops->task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); + if (retval) + return retval; old_ruid = new_ruid = current->uid; old_suid = current->suid; @@ -652,11 +631,7 @@ current->fsuid = current->euid = uid; current->suid = new_suid; - if (!issecure(SECURE_NO_SETUID_FIXUP)) { - cap_emulate_setxuid(old_ruid, old_euid, old_suid); - } - - return 0; + return security_ops->task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); } @@ -669,6 +644,11 @@ int old_ruid = current->uid; int old_euid = current->euid; int old_suid = current->suid; + int retval; + + retval = security_ops->task_setuid(ruid, euid, suid, LSM_SETID_RES); + if (retval) + return retval; if (!capable(CAP_SETUID)) { if ((ruid != (uid_t) -1) && (ruid != current->uid) && @@ -697,11 +677,7 @@ if (suid != (uid_t) -1) current->suid = suid; - if (!issecure(SECURE_NO_SETUID_FIXUP)) { - cap_emulate_setxuid(old_ruid, old_euid, old_suid); - } - - return 0; + return security_ops->task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); } asmlinkage long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid) @@ -720,6 +696,12 @@ */ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) { + int retval; + + retval = security_ops->task_setgid(rgid, egid, sgid, LSM_SETID_RES); + if (retval) + return retval; + if (!capable(CAP_SETGID)) { if ((rgid != (gid_t) -1) && (rgid != current->gid) && (rgid != current->egid) && (rgid != current->sgid)) @@ -768,6 +750,11 @@ asmlinkage long sys_setfsuid(uid_t uid) { int old_fsuid; + int retval; + + retval = security_ops->task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); + if (retval) + return retval; old_fsuid = current->fsuid; if (uid == current->uid || uid == current->euid || @@ -782,24 +769,9 @@ current->fsuid = uid; } - /* We emulate fsuid by essentially doing a scaled-down version - * of what we did in setresuid and friends. However, we only - * operate on the fs-specific bits of the process' effective - * capabilities - * - * FIXME - is fsuser used for all CAP_FS_MASK capabilities? - * if not, we might be a bit too harsh here. - */ - - if (!issecure(SECURE_NO_SETUID_FIXUP)) { - if (old_fsuid == 0 && current->fsuid != 0) { - cap_t(current->cap_effective) &= ~CAP_FS_MASK; - } - if (old_fsuid != 0 && current->fsuid == 0) { - cap_t(current->cap_effective) |= - (cap_t(current->cap_permitted) & CAP_FS_MASK); - } - } + retval = security_ops->task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); + if (retval) + return retval; return old_fsuid; } @@ -810,6 +782,11 @@ asmlinkage long sys_setfsgid(gid_t gid) { int old_fsgid; + int retval; + + retval = security_ops->task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS); + if (retval) + return retval; old_fsgid = current->fsgid; if (gid == current->gid || gid == current->egid || @@ -904,6 +881,10 @@ } ok_pgid: + err = security_ops->task_setpgid(p, pgid); + if (err) + goto out; + p->pgrp = pgid; err = 0; out: @@ -924,8 +905,11 @@ p = find_task_by_pid(pid); retval = -ESRCH; - if (p) - retval = p->pgrp; + if (p) { + retval = security_ops->task_getpgid(p); + if (!retval) + retval = p->pgrp; + } read_unlock(&tasklist_lock); return retval; } @@ -949,8 +933,11 @@ p = find_task_by_pid(pid); retval = -ESRCH; - if(p) - retval = p->session; + if(p) { + retval = security_ops->task_getsid(p); + if (!retval) + retval = p->session; + } read_unlock(&tasklist_lock); return retval; } @@ -1008,12 +995,19 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t *grouplist) { + gid_t groups[NGROUPS]; + int retval; + if (!capable(CAP_SETGID)) return -EPERM; if ((unsigned) gidsetsize > NGROUPS) return -EINVAL; - if(copy_from_user(current->groups, grouplist, gidsetsize * sizeof(gid_t))) + if(copy_from_user(groups, grouplist, gidsetsize * sizeof(gid_t))) return -EFAULT; + retval = security_ops->task_setgroups(gidsetsize, groups); + if (retval) + return retval; + memcpy(current->groups, groups, gidsetsize * sizeof(gid_t)); current->ngroups = gidsetsize; return 0; } @@ -1158,6 +1152,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit *rlim) { struct rlimit new_rlim, *old_rlim; + int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; @@ -1172,6 +1167,11 @@ if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN) return -EPERM; } + + retval = security_ops->task_setrlimit(resource, &new_rlim); + if (retval) + return retval; + *old_rlim = new_rlim; return 0; } @@ -1242,6 +1242,10 @@ { int error = 0; int sig; + + error = security_ops->task_prctl(option, arg2, arg3, arg4, arg5); + if (error) + return error; switch (option) { case PR_SET_PDEATHSIG: diff -Nru a/kernel/uid16.c b/kernel/uid16.c --- a/kernel/uid16.c Sat Jul 20 12:12:34 2002 +++ b/kernel/uid16.c Sat Jul 20 12:12:34 2002 @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -128,6 +129,7 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t *grouplist) { old_gid_t groups[NGROUPS]; + gid_t new_groups[NGROUPS]; int i; if (!capable(CAP_SETGID)) @@ -137,7 +139,11 @@ if (copy_from_user(groups, grouplist, gidsetsize * sizeof(old_gid_t))) return -EFAULT; for (i = 0 ; i < gidsetsize ; i++) - current->groups[i] = (gid_t)groups[i]; + new_groups[i] = (gid_t)groups[i]; + i = security_ops->task_setgroups(gidsetsize, new_groups); + if (i) + return i; + memcpy(current->groups, new_groups, gidsetsize * sizeof(gid_t)); current->ngroups = gidsetsize; return 0; } diff -Nru a/mm/Makefile b/mm/Makefile --- a/mm/Makefile Sat Jul 20 12:12:35 2002 +++ b/mm/Makefile Sat Jul 20 12:12:35 2002 @@ -16,6 +16,6 @@ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \ shmem.o highmem.o mempool.o msync.o mincore.o readahead.o \ - pdflush.o page-writeback.o + pdflush.o page-writeback.o rmap.o include $(TOPDIR)/Rules.make diff -Nru a/mm/filemap.c b/mm/filemap.c --- a/mm/filemap.c Sat Jul 20 12:12:35 2002 +++ b/mm/filemap.c Sat Jul 20 12:12:35 2002 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -176,14 +177,13 @@ */ static void truncate_complete_page(struct page *page) { - /* Leave it on the LRU if it gets converted into anonymous buffers */ - if (!PagePrivate(page) || do_invalidatepage(page, 0)) { - lru_cache_del(page); - } else { + /* Drop fs-specific data so the page might become freeable. */ + if (PagePrivate(page) && !do_invalidatepage(page, 0)) { if (current->flags & PF_INVALIDATE) printk("%s: buffer heads were leaked\n", current->comm); } + ClearPageDirty(page); ClearPageUptodate(page); remove_inode_page(page); @@ -520,8 +520,6 @@ * This adds a page to the page cache, starting out as locked, unreferenced, * not uptodate and with no errors. * - * The caller must hold a write_lock on mapping->page_lock. - * * This function is used for two things: adding newly allocated pagecache * pages and for moving existing anon pages into swapcache. * @@ -533,44 +531,20 @@ * SetPageLocked() is ugly-but-OK there too. The required page state has been * set up by swap_out_add_to_swap_cache(). */ -static int __add_to_page_cache(struct page *page, +int add_to_page_cache(struct page *page, struct address_space *mapping, unsigned long offset) { - if (radix_tree_insert(&mapping->page_tree, offset, page) == 0) { + int error; + + write_lock(&mapping->page_lock); + error = radix_tree_insert(&mapping->page_tree, offset, page); + if (!error) { SetPageLocked(page); ClearPageDirty(page); ___add_to_page_cache(page, mapping, offset); page_cache_get(page); - return 0; } - return -ENOMEM; -} - -int add_to_page_cache(struct page *page, - struct address_space *mapping, unsigned long offset) -{ - write_lock(&mapping->page_lock); - if (__add_to_page_cache(page, mapping, offset) < 0) - goto nomem; - write_unlock(&mapping->page_lock); - lru_cache_add(page); - return 0; -nomem: - write_unlock(&mapping->page_lock); - return -ENOMEM; -} - -int add_to_page_cache_unique(struct page *page, - struct address_space *mapping, unsigned long offset) -{ - struct page *alias; - int error = -EEXIST; - - write_lock(&mapping->page_lock); - if (!(alias = radix_tree_lookup(&mapping->page_tree, offset))) - error = __add_to_page_cache(page, mapping, offset); write_unlock(&mapping->page_lock); - if (!error) lru_cache_add(page); return error; @@ -587,17 +561,11 @@ struct page *page; int error; - read_lock(&mapping->page_lock); - page = radix_tree_lookup(&mapping->page_tree, offset); - read_unlock(&mapping->page_lock); - if (page) - return 0; - page = page_cache_alloc(mapping); if (!page) return -ENOMEM; - error = add_to_page_cache_unique(page, mapping, offset); + error = add_to_page_cache(page, mapping, offset); if (!error) { error = mapping->a_ops->readpage(file, page); page_cache_release(page); @@ -660,7 +628,7 @@ * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The first mb is necessary to safely close the critical section opened by the - * TryLockPage(), the second mb is necessary to enforce ordering between + * TestSetPageLocked(), the second mb is necessary to enforce ordering between * the clear_bit and the read of the waitqueue (to avoid SMP races with a * parallel wait_on_page_locked()). */ @@ -759,28 +727,31 @@ return page; } -/* - * Must be called with the mapping lock held for writing. - * Will return with it held for writing, but it may be dropped - * while locking the page. +/** + * find_lock_page - locate, pin and lock a pagecache page + * + * @mapping - the address_space to search + * @offset - the page index + * + * Locates the desired pagecache page, locks it, increments its reference + * count and returns its address. + * + * Returns zero if the page was not present. find_lock_page() may sleep. */ -static struct page *__find_lock_page(struct address_space *mapping, - unsigned long offset) +struct page *find_lock_page(struct address_space *mapping, + unsigned long offset) { struct page *page; - /* - * We scan the hash list read-only. Addition to and removal from - * the hash-list needs a held write-lock. - */ + read_lock(&mapping->page_lock); repeat: page = radix_tree_lookup(&mapping->page_tree, offset); if (page) { page_cache_get(page); if (TestSetPageLocked(page)) { - write_unlock(&mapping->page_lock); + read_unlock(&mapping->page_lock); lock_page(page); - write_lock(&mapping->page_lock); + read_lock(&mapping->page_lock); /* Has the page been truncated while we slept? */ if (page->mapping != mapping || page->index != offset) { @@ -790,34 +761,7 @@ } } } - return page; -} - -/** - * find_lock_page - locate, pin and lock a pagecache page - * - * @mapping - the address_space to search - * @offset - the page index - * - * Locates the desired pagecache page, locks it, increments its reference - * count and returns its address. - * - * Returns zero if the page was not present. find_lock_page() may sleep. - */ - -/* - * The write_lock is unfortunate, but __find_lock_page() requires that on - * behalf of find_or_create_page(). We could just clone __find_lock_page() - - * one for find_lock_page(), one for find_or_create_page()... - */ -struct page *find_lock_page(struct address_space *mapping, - unsigned long offset) -{ - struct page *page; - - write_lock(&mapping->page_lock); - page = __find_lock_page(mapping, offset); - write_unlock(&mapping->page_lock); + read_unlock(&mapping->page_lock); return page; } @@ -842,32 +786,25 @@ struct page *find_or_create_page(struct address_space *mapping, unsigned long index, unsigned int gfp_mask) { - struct page *page; - + struct page *page, *cached_page = NULL; + int err; +repeat: page = find_lock_page(mapping, index); if (!page) { - struct page *newpage = alloc_page(gfp_mask); - if (newpage) { - write_lock(&mapping->page_lock); - page = __find_lock_page(mapping, index); - if (likely(!page)) { - page = newpage; - if (__add_to_page_cache(page, mapping, index)) { - write_unlock(&mapping->page_lock); - page_cache_release(page); - page = NULL; - goto out; - } - newpage = NULL; - } - write_unlock(&mapping->page_lock); - if (newpage == NULL) - lru_cache_add(page); - else - page_cache_release(newpage); + if (!cached_page) { + cached_page = alloc_page(gfp_mask); + if (!cached_page) + return NULL; } + err = add_to_page_cache(cached_page, mapping, index); + if (!err) { + page = cached_page; + cached_page = NULL; + } else if (err == -EEXIST) + goto repeat; } -out: + if (cached_page) + page_cache_release(cached_page); return page; } @@ -901,7 +838,7 @@ return NULL; } page = alloc_pages(mapping->gfp_mask & ~__GFP_FS, 0); - if (page && add_to_page_cache_unique(page, mapping, index)) { + if (page && add_to_page_cache(page, mapping, index)) { page_cache_release(page); page = NULL; } @@ -968,18 +905,16 @@ /* * Try to find the data in the page cache.. */ - - write_lock(&mapping->page_lock); +find_page: + read_lock(&mapping->page_lock); page = radix_tree_lookup(&mapping->page_tree, index); if (!page) { - write_unlock(&mapping->page_lock); - handle_ra_thrashing(filp); - write_lock(&mapping->page_lock); + read_unlock(&mapping->page_lock); + handle_ra_miss(filp); goto no_cached_page; } -found_page: page_cache_get(page); - write_unlock(&mapping->page_lock); + read_unlock(&mapping->page_lock); if (!PageUptodate(page)) goto page_not_up_to_date; @@ -1059,40 +994,23 @@ /* * Ok, it wasn't cached, so we need to create a new * page.. - * - * We get here with the page cache lock held. */ if (!cached_page) { - write_unlock(&mapping->page_lock); cached_page = page_cache_alloc(mapping); if (!cached_page) { desc->error = -ENOMEM; break; } - - /* - * Somebody may have added the page while we - * dropped the page cache lock. Check for that. - */ - write_lock(&mapping->page_lock); - page = radix_tree_lookup(&mapping->page_tree, index); - if (page) - goto found_page; } - - /* - * Ok, add the new page to the hash-queues... - */ - if (__add_to_page_cache(cached_page, mapping, index) < 0) { - write_unlock(&mapping->page_lock); - desc->error = -ENOMEM; + error = add_to_page_cache(cached_page, mapping, index); + if (error) { + if (error == -EEXIST) + goto find_page; + desc->error = error; break; } page = cached_page; - write_unlock(&mapping->page_lock); - lru_cache_add(page); cached_page = NULL; - goto readpage; } @@ -1102,89 +1020,6 @@ UPDATE_ATIME(inode); } -#if 0 -static ssize_t generic_file_direct_IO(int rw, struct file * filp, char * buf, size_t count, loff_t offset) -{ - ssize_t retval; - int new_iobuf, chunk_size, blocksize_mask, blocksize, blocksize_bits, iosize, progress; - struct kiobuf * iobuf; - struct address_space * mapping = filp->f_dentry->d_inode->i_mapping; - struct inode * inode = mapping->host; - - new_iobuf = 0; - iobuf = filp->f_iobuf; - if (test_and_set_bit(0, &filp->f_iobuf_lock)) { - /* - * A parallel read/write is using the preallocated iobuf - * so just run slow and allocate a new one. - */ - retval = alloc_kiovec(1, &iobuf); - if (retval) - goto out; - new_iobuf = 1; - } - - blocksize = 1 << inode->i_blkbits; - blocksize_bits = inode->i_blkbits; - blocksize_mask = blocksize - 1; - chunk_size = KIO_MAX_ATOMIC_IO << 10; - - retval = -EINVAL; - if ((offset & blocksize_mask) || (count & blocksize_mask)) - goto out_free; - - /* - * Flush to disk exclusively the _data_, metadata must remain - * completly asynchronous or performance will go to /dev/null. - */ - retval = filemap_fdatawait(mapping); - if (retval == 0) - retval = filemap_fdatawrite(mapping); - if (retval == 0) - retval = filemap_fdatawait(mapping); - if (retval < 0) - goto out_free; - - progress = retval = 0; - while (count > 0) { - iosize = count; - if (iosize > chunk_size) - iosize = chunk_size; - - retval = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize); - if (retval) - break; - - retval = mapping->a_ops->direct_IO(rw, inode, iobuf, (offset+progress) >> blocksize_bits, blocksize); - - if (rw == READ && retval > 0) - mark_dirty_kiobuf(iobuf, retval); - - if (retval >= 0) { - count -= retval; - buf += retval; - progress += retval; - } - - unmap_kiobuf(iobuf); - - if (retval != iosize) - break; - } - - if (progress) - retval = progress; - - out_free: - if (!new_iobuf) - clear_bit(0, &filp->f_iobuf_lock); - else - free_kiovec(1, &iobuf); - out: - return retval; -} -#endif - int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) { char *kaddr; @@ -1454,6 +1289,7 @@ struct inode *inode = mapping->host; struct page *page; unsigned long size, pgoff, endoff; + int did_readahead; pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; @@ -1467,31 +1303,45 @@ if ((pgoff >= size) && (area->vm_mm == current->mm)) return NULL; - /* The "size" of the file, as far as mmap is concerned, isn't bigger than the mapping */ + /* + * The "size" of the file, as far as mmap is concerned, isn't bigger + * than the mapping + */ if (size > endoff) size = endoff; + did_readahead = 0; + /* * The readahead code wants to be told about each and every page * so it can build and shrink its windows appropriately */ - if (VM_SequentialReadHint(area)) + if (VM_SequentialReadHint(area)) { + did_readahead = 1; page_cache_readahead(area->vm_file, pgoff); + } /* * If the offset is outside the mapping size we're off the end * of a privately mapped file, so we need to map a zero page. */ - if ((pgoff < size) && !VM_RandomReadHint(area)) + if ((pgoff < size) && !VM_RandomReadHint(area)) { + did_readahead = 1; page_cache_readaround(file, pgoff); + } /* * Do we have something in the page cache already? */ retry_find: page = find_get_page(mapping, pgoff); - if (!page) + if (!page) { + if (did_readahead) { + handle_ra_miss(file); + did_readahead = 0; + } goto no_cached_page; + } /* * Ok, found a page in the page cache, now we need to check @@ -1534,6 +1384,7 @@ return NULL; page_not_uptodate: + KERNEL_STAT_INC(pgmajfault); lock_page(page); /* Did it get unhashed while we waited for it? */ @@ -1957,7 +1808,7 @@ if (!cached_page) return ERR_PTR(-ENOMEM); } - err = add_to_page_cache_unique(cached_page, mapping, index); + err = add_to_page_cache(cached_page, mapping, index); if (err == -EEXIST) goto repeat; if (err < 0) { @@ -2030,7 +1881,7 @@ if (!*cached_page) return NULL; } - err = add_to_page_cache_unique(*cached_page, mapping, index); + err = add_to_page_cache(*cached_page, mapping, index); if (err == -EEXIST) goto repeat; if (err == 0) { diff -Nru a/mm/memory.c b/mm/memory.c --- a/mm/memory.c Sat Jul 20 12:12:35 2002 +++ b/mm/memory.c Sat Jul 20 12:12:35 2002 @@ -36,6 +36,7 @@ * (Gerhard.Wichert@pdb.siemens.de) */ +#include #include #include #include @@ -46,6 +47,7 @@ #include #include +#include #include #include #include @@ -79,7 +81,7 @@ */ static inline void free_one_pmd(mmu_gather_t *tlb, pmd_t * dir) { - struct page *pte; + struct page *page; if (pmd_none(*dir)) return; @@ -88,9 +90,10 @@ pmd_clear(dir); return; } - pte = pmd_page(*dir); + page = pmd_page(*dir); pmd_clear(dir); - pte_free_tlb(tlb, pte); + pgtable_remove_rmap(page); + pte_free_tlb(tlb, page); } static inline void free_one_pgd(mmu_gather_t *tlb, pgd_t * dir) @@ -150,6 +153,7 @@ pte_free(new); goto out; } + pgtable_add_rmap(new, mm, address); pmd_populate(mm, pmd, new); } out: @@ -177,6 +181,7 @@ pte_free_kernel(new); goto out; } + pgtable_add_rmap(virt_to_page(new), mm, address); pmd_populate_kernel(mm, pmd, new); } out: @@ -260,10 +265,13 @@ if (pte_none(pte)) goto cont_copy_pte_range_noset; + /* pte contains position in swap, so copy. */ if (!pte_present(pte)) { swap_duplicate(pte_to_swp_entry(pte)); - goto cont_copy_pte_range; + set_pte(dst_pte, pte); + goto cont_copy_pte_range_noset; } + ptepage = pte_page(pte); pfn = pte_pfn(pte); if (!pfn_valid(pfn)) goto cont_copy_pte_range; @@ -272,7 +280,7 @@ goto cont_copy_pte_range; /* If it's a COW mapping, write protect it both in the parent and the child */ - if (cow && pte_write(pte)) { + if (cow) { ptep_set_wrprotect(src_pte); pte = *src_pte; } @@ -285,6 +293,7 @@ dst->rss++; cont_copy_pte_range: set_pte(dst_pte, pte); + page_add_rmap(ptepage, dst_pte); cont_copy_pte_range_noset: address += PAGE_SIZE; if (address >= end) { pte_unmap_nested(src_pte); @@ -342,6 +351,7 @@ if (pte_dirty(pte)) set_page_dirty(page); tlb->freed++; + page_remove_rmap(page, ptep); tlb_remove_page(tlb, page); } } @@ -992,7 +1002,9 @@ if (pte_same(*page_table, pte)) { if (PageReserved(old_page)) ++mm->rss; + page_remove_rmap(old_page, page_table); break_cow(vma, new_page, address, page_table); + page_add_rmap(new_page, page_table); lru_cache_add(new_page); /* Free the old page.. */ @@ -1166,6 +1178,7 @@ /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; + KERNEL_STAT_INC(pgmajfault); } lock_page(page); @@ -1199,6 +1212,7 @@ flush_page_to_ram(page); flush_icache_page(vma, page); set_pte(page_table, pte); + page_add_rmap(page, page_table); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); @@ -1215,14 +1229,13 @@ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, pmd_t *pmd, int write_access, unsigned long addr) { pte_t entry; + struct page * page = ZERO_PAGE(addr); /* Read-only mapping of ZERO_PAGE. */ entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot)); /* ..except if it's a write access */ if (write_access) { - struct page *page; - /* Allocate our own private page. */ pte_unmap(page_table); spin_unlock(&mm->page_table_lock); @@ -1248,6 +1261,7 @@ } set_pte(page_table, entry); + page_add_rmap(page, page_table); /* ignores ZERO_PAGE */ pte_unmap(page_table); /* No need to invalidate - it was non-present before */ @@ -1327,6 +1341,7 @@ if (write_access) entry = pte_mkwrite(pte_mkdirty(entry)); set_pte(page_table, entry); + page_add_rmap(new_page, page_table); pte_unmap(page_table); } else { /* One of our sibling threads was faster, back out. */ @@ -1406,6 +1421,7 @@ current->state = TASK_RUNNING; pgd = pgd_offset(mm, address); + KERNEL_STAT_INC(pgfault); /* * We need the page table lock to synchronize with kswapd * and the SMP-safe atomic PTE updates. diff -Nru a/mm/mremap.c b/mm/mremap.c --- a/mm/mremap.c Sat Jul 20 12:12:35 2002 +++ b/mm/mremap.c Sat Jul 20 12:12:35 2002 @@ -68,8 +68,14 @@ { int error = 0; pte_t pte; + struct page * page = NULL; + + if (pte_present(*src)) + page = pte_page(*src); if (!pte_none(*src)) { + if (page) + page_remove_rmap(page, src); pte = ptep_get_and_clear(src); if (!dst) { /* No dest? We must put it back. */ @@ -77,6 +83,8 @@ error++; } set_pte(dst, pte); + if (page) + page_add_rmap(page, dst); } return error; } diff -Nru a/mm/page-writeback.c b/mm/page-writeback.c --- a/mm/page-writeback.c Sat Jul 20 12:12:34 2002 +++ b/mm/page-writeback.c Sat Jul 20 12:12:34 2002 @@ -19,7 +19,7 @@ #include #include #include -//#include +#include #include #include @@ -37,7 +37,7 @@ * will look to see if it needs to force writeback or throttling. Probably * should be scaled by memory size. */ -#define RATELIMIT_PAGES 1000 +#define RATELIMIT_PAGES ((512 * 1024) / PAGE_SIZE) /* * When balance_dirty_pages decides that the caller needs to perform some @@ -45,7 +45,7 @@ * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably * large amounts of I/O are submitted. */ -#define SYNC_WRITEBACK_PAGES 1500 +#define SYNC_WRITEBACK_PAGES ((RATELIMIT_PAGES * 3) / 2) /* The following parameters are exported via /proc/sys/vm */ @@ -108,6 +108,7 @@ struct page_state ps; int background_thresh, async_thresh, sync_thresh; unsigned long dirty_and_writeback; + struct backing_dev_info *bdi; get_page_state(&ps); dirty_and_writeback = ps.nr_dirty + ps.nr_writeback; @@ -115,21 +116,21 @@ background_thresh = (dirty_background_ratio * tot) / 100; async_thresh = (dirty_async_ratio * tot) / 100; sync_thresh = (dirty_sync_ratio * tot) / 100; + bdi = mapping->backing_dev_info; if (dirty_and_writeback > sync_thresh) { int nr_to_write = SYNC_WRITEBACK_PAGES; - writeback_unlocked_inodes(&nr_to_write, WB_SYNC_LAST, NULL); + writeback_backing_dev(bdi, &nr_to_write, WB_SYNC_LAST, NULL); get_page_state(&ps); } else if (dirty_and_writeback > async_thresh) { int nr_to_write = SYNC_WRITEBACK_PAGES; - writeback_unlocked_inodes(&nr_to_write, WB_SYNC_NONE, NULL); + writeback_backing_dev(bdi, &nr_to_write, WB_SYNC_NONE, NULL); get_page_state(&ps); } - if (!writeback_in_progress(mapping->backing_dev_info) && - ps.nr_dirty > background_thresh) + if (!writeback_in_progress(bdi) && ps.nr_dirty > background_thresh) pdflush_operation(background_writeout, 0); } @@ -171,6 +172,8 @@ long min_pages = _min_pages; int nr_to_write; + CHECK_EMERGENCY_SYNC + do { struct page_state ps; @@ -313,12 +316,6 @@ return 0; } EXPORT_SYMBOL(generic_vm_writeback); - -int generic_writepages(struct address_space *mapping, int *nr_to_write) -{ - return mpage_writepages(mapping, nr_to_write, NULL); -} -EXPORT_SYMBOL(generic_writepages); int do_writepages(struct address_space *mapping, int *nr_to_write) { diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c --- a/mm/page_alloc.c Sat Jul 20 12:12:34 2002 +++ b/mm/page_alloc.c Sat Jul 20 12:12:34 2002 @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -86,12 +87,15 @@ struct page *base; zone_t *zone; + KERNEL_STAT_ADD(pgfree, 1<mapping != NULL); BUG_ON(PageLocked(page)); BUG_ON(PageLRU(page)); BUG_ON(PageActive(page)); BUG_ON(PageWriteback(page)); + BUG_ON(page->pte.chain != NULL); if (PageDirty(page)) ClearPageDirty(page); BUG_ON(page_count(page) != 0); @@ -319,20 +323,23 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist) { unsigned long min; - zone_t **zone, * classzone; + zone_t **zones, *classzone; struct page * page; - int freed; + int freed, i; + + KERNEL_STAT_ADD(pgalloc, 1<zones; - classzone = *zone; - if (classzone == NULL) + zones = zonelist->zones; /* the list of zones suitable for gfp_mask */ + classzone = zones[0]; + if (classzone == NULL) /* no zones in the zonelist */ return NULL; + + /* Go through the zonelist once, looking for a zone with enough free */ min = 1UL << order; - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; + for (i = 0; zones[i] != NULL; i++) { + zone_t *z = zones[i]; + /* the incremental min is allegedly to discourage fallback */ min += z->pages_low; if (z->free_pages > min) { page = rmqueue(z, order); @@ -343,16 +350,15 @@ classzone->need_balance = 1; mb(); + /* we're somewhat low on memory, failed to find what we needed */ if (waitqueue_active(&kswapd_wait)) wake_up_interruptible(&kswapd_wait); - zone = zonelist->zones; + /* Go through the zonelist again, taking __GFP_HIGH into account */ min = 1UL << order; - for (;;) { + for (i = 0; zones[i] != NULL; i++) { unsigned long local_min; - zone_t *z = *(zone++); - if (!z) - break; + zone_t *z = zones[i]; local_min = z->pages_min; if (gfp_mask & __GFP_HIGH) @@ -369,11 +375,9 @@ rebalance: if (current->flags & (PF_MEMALLOC | PF_MEMDIE)) { - zone = zonelist->zones; - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; + /* go through the zonelist yet again, ignoring mins */ + for (i = 0; zones[i] != NULL; i++) { + zone_t *z = zones[i]; page = rmqueue(z, order); if (page) @@ -392,16 +396,15 @@ if (!(gfp_mask & __GFP_WAIT)) goto nopage; + KERNEL_STAT_INC(allocstall); page = balance_classzone(classzone, gfp_mask, order, &freed); if (page) return page; - zone = zonelist->zones; + /* go through the zonelist yet one more time */ min = 1UL << order; - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; + for (i = 0; zones[i] != NULL; i++) { + zone_t *z = zones[i]; min += z->pages_min; if (z->free_pages > min) { @@ -562,6 +565,9 @@ ret->nr_pagecache += ps->nr_pagecache; ret->nr_active += ps->nr_active; ret->nr_inactive += ps->nr_inactive; + ret->nr_page_table_pages += ps->nr_page_table_pages; + ret->nr_pte_chain_pages += ps->nr_pte_chain_pages; + ret->used_pte_chains_bytes += ps->used_pte_chains_bytes; } } diff -Nru a/mm/page_io.c b/mm/page_io.c --- a/mm/page_io.c Sat Jul 20 12:12:34 2002 +++ b/mm/page_io.c Sat Jul 20 12:12:34 2002 @@ -17,6 +17,7 @@ #include #include #include /* for block_sync_page() */ +#include #include static struct bio * diff -Nru a/mm/readahead.c b/mm/readahead.c --- a/mm/readahead.c Sat Jul 20 12:12:34 2002 +++ b/mm/readahead.c Sat Jul 20 12:12:34 2002 @@ -43,7 +43,7 @@ for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_entry(pages->prev, struct page, list); list_del(&page->list); - if (!add_to_page_cache_unique(page, mapping, page->index)) + if (!add_to_page_cache(page, mapping, page->index)) mapping->a_ops->readpage(file, page); page_cache_release(page); } @@ -61,6 +61,7 @@ * Together, these form the "current window". * Together, start and size represent the `readahead window'. * next_size: The number of pages to read on the next readahead miss. + * Has the magical value -1UL if readahead has been disabled. * prev_page: The page which the readahead algorithm most-recently inspected. * prev_page is mainly an optimisation: if page_cache_readahead * sees that it is again being called for a page which it just @@ -68,6 +69,7 @@ * changes. * ahead_start, * ahead_size: Together, these form the "ahead window". + * ra_pages: The externally controlled max readahead for this fd. * * The readahead code manages two windows - the "current" and the "ahead" * windows. The intent is that while the application is walking the pages @@ -120,8 +122,10 @@ * the pages first, then submits them all for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. + * + * Returns the number of pages which actually had IO started against them. */ -void do_page_cache_readahead(struct file *file, +int do_page_cache_readahead(struct file *file, unsigned long offset, unsigned long nr_to_read) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; @@ -130,10 +134,10 @@ unsigned long end_index; /* The last page we want to read */ LIST_HEAD(page_pool); int page_idx; - int nr_to_really_read = 0; + int ret = 0; if (inode->i_size == 0) - return; + goto out; end_index = ((inode->i_size - 1) >> PAGE_CACHE_SHIFT); @@ -158,7 +162,7 @@ break; page->index = page_offset; list_add(&page->list, &page_pool); - nr_to_really_read++; + ret++; } read_unlock(&mapping->page_lock); @@ -167,10 +171,36 @@ * uptodate then the caller will launch readpage again, and * will then handle the error. */ - read_pages(file, mapping, &page_pool, nr_to_really_read); - blk_run_queues(); + if (ret) { + read_pages(file, mapping, &page_pool, ret); + blk_run_queues(); + } BUG_ON(!list_empty(&page_pool)); - return; +out: + return ret; +} + +/* + * Check how effective readahead is being. If the amount of started IO is + * less than expected then the file is partly or fully in pagecache and + * readahead isn't helping. Shrink the window. + * + * But don't shrink it too much - the application may read the same page + * occasionally. + */ +static inline void +check_ra_success(struct file_ra_state *ra, pgoff_t attempt, + pgoff_t actual, pgoff_t orig_next_size) +{ + if (actual == 0) { + if (orig_next_size > 1) { + ra->next_size = orig_next_size - 1; + if (ra->ahead_size) + ra->ahead_size = ra->next_size; + } else { + ra->next_size = -1UL; + } + } } /* @@ -180,25 +210,32 @@ void page_cache_readahead(struct file *file, unsigned long offset) { struct file_ra_state *ra = &file->f_ra; - unsigned long max; - unsigned long min; + unsigned max; + unsigned min; + unsigned orig_next_size; + unsigned actual; /* * Here we detect the case where the application is performing * sub-page sized reads. We avoid doing extra work and bogusly * perturbing the readahead window expansion logic. * If next_size is zero, this is the very first read for this - * file handle. + * file handle, or the window is maximally shrunk. */ if (offset == ra->prev_page) { if (ra->next_size != 0) goto out; } + if (ra->next_size == -1UL) + goto out; /* Maximally shrunk */ + max = get_max_readahead(file); if (max == 0) goto out; /* No readahead */ + min = get_min_readahead(file); + orig_next_size = ra->next_size; if (ra->next_size == 0 && offset == 0) { /* @@ -224,8 +261,6 @@ * window by 25%. */ ra->next_size -= ra->next_size / 4; - if (ra->next_size < min) - ra->next_size = min; } if (ra->next_size > max) @@ -272,19 +307,21 @@ ra->ahead_start = 0; /* Invalidate these */ ra->ahead_size = 0; - do_page_cache_readahead(file, offset, ra->size); + actual = do_page_cache_readahead(file, offset, ra->size); + check_ra_success(ra, ra->size, actual, orig_next_size); } else { /* - * This read request is within the current window. It - * is time to submit I/O for the ahead window while - * the application is crunching through the current - * window. + * This read request is within the current window. It is time + * to submit I/O for the ahead window while the application is + * crunching through the current window. */ if (ra->ahead_start == 0) { ra->ahead_start = ra->start + ra->size; ra->ahead_size = ra->next_size; - do_page_cache_readahead(file, + actual = do_page_cache_readahead(file, ra->ahead_start, ra->ahead_size); + check_ra_success(ra, ra->ahead_size, + actual, orig_next_size); } } out: @@ -298,38 +335,55 @@ */ void page_cache_readaround(struct file *file, unsigned long offset) { - const unsigned long min = get_min_readahead(file) * 2; - unsigned long target; - unsigned long backward; - - if (file->f_ra.next_size < min) - file->f_ra.next_size = min; - - target = offset; - backward = file->f_ra.next_size / 4; - - if (backward > target) - target = 0; - else - target -= backward; - page_cache_readahead(file, target); + struct file_ra_state *ra = &file->f_ra; + + if (ra->next_size != -1UL) { + const unsigned long min = get_min_readahead(file) * 2; + unsigned long target; + unsigned long backward; + + /* + * If next_size is zero then leave it alone, because that's a + * readahead startup state. + */ + if (ra->next_size && ra->next_size < min) + ra->next_size = min; + + target = offset; + backward = ra->next_size / 4; + + if (backward > target) + target = 0; + else + target -= backward; + page_cache_readahead(file, target); + } } /* - * handle_ra_thrashing() is called when it is known that a page which should - * have been present (it's inside the readahead window) was in fact evicted by - * the VM. - * - * We shrink the readahead window by three pages. This is because we grow it - * by two pages on a readahead hit. Theory being that the readahead window - * size will stabilise around the maximum level at which there isn't any - * thrashing. + * handle_ra_miss() is called when it is known that a page which should have + * been present in the pagecache (we just did some readahead there) was in fact + * not found. This will happen if it was evicted by the VM (readahead + * thrashing) or if the readahead window is maximally shrunk. + * + * If the window has been maximally shrunk (next_size == 0) then bump it up + * again to resume readahead. + * + * Otherwise we're thrashing, so shrink the readahead window by three pages. + * This is because it is grown by two pages on a readahead hit. Theory being + * that the readahead window size will stabilise around the maximum level at + * which there is no thrashing. */ -void handle_ra_thrashing(struct file *file) +void handle_ra_miss(struct file *file) { + struct file_ra_state *ra = &file->f_ra; const unsigned long min = get_min_readahead(file); - file->f_ra.next_size -= 3; - if (file->f_ra.next_size < min) - file->f_ra.next_size = min; + if (ra->next_size == -1UL) { + ra->next_size = min; + } else { + ra->next_size -= 3; + if (ra->next_size < min) + ra->next_size = min; + } } diff -Nru a/mm/rmap.c b/mm/rmap.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/mm/rmap.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,455 @@ +/* + * mm/rmap.c - physical to virtual reverse mappings + * + * Copyright 2001, Rik van Riel + * Released under the General Public License (GPL). + * + * + * Simple, low overhead pte-based reverse mapping scheme. + * This is kept modular because we may want to experiment + * with object-based reverse mapping schemes. Please try + * to keep this thing as modular as possible. + */ + +/* + * Locking: + * - the page->pte.chain is protected by the PG_chainlock bit, + * which nests within the pagemap_lru_lock, then the + * mm->page_table_lock, and then the page lock. + * - because swapout locking is opposite to the locking order + * in the page fault path, the swapout path uses trylocks + * on the mm->page_table_lock + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +/* #define DEBUG_RMAP */ + +/* + * Shared pages have a chain of pte_chain structures, used to locate + * all the mappings to this page. We only need a pointer to the pte + * here, the page struct for the page table page contains the process + * it belongs to and the offset within that process. + * + * A singly linked list should be fine for most, if not all, workloads. + * On fork-after-exec the mapping we'll be removing will still be near + * the start of the list, on mixed application systems the short-lived + * processes will have their mappings near the start of the list and + * in systems with long-lived applications the relative overhead of + * exit() will be lower since the applications are long-lived. + */ +struct pte_chain { + struct pte_chain * next; + pte_t * ptep; +}; + +static inline struct pte_chain * pte_chain_alloc(void); +static inline void pte_chain_free(struct pte_chain *, struct pte_chain *, + struct page *); +static void alloc_new_pte_chains(void); + +/** + * page_referenced - test if the page was referenced + * @page: the page to test + * + * Quick test_and_clear_referenced for all mappings to a page, + * returns the number of processes which referenced the page. + * Caller needs to hold the pte_chain_lock. + */ +int page_referenced(struct page * page) +{ + struct pte_chain * pc; + int referenced = 0; + + if (TestClearPageReferenced(page)) + referenced++; + + if (PageDirect(page)) { + if (ptep_test_and_clear_young(page->pte.direct)) + referenced++; + } else { + /* Check all the page tables mapping this page. */ + for (pc = page->pte.chain; pc; pc = pc->next) { + if (ptep_test_and_clear_young(pc->ptep)) + referenced++; + } + } + return referenced; +} + +/** + * page_add_rmap - add reverse mapping entry to a page + * @page: the page to add the mapping to + * @ptep: the page table entry mapping this page + * + * Add a new pte reverse mapping to a page. + * The caller needs to hold the mm->page_table_lock. + */ +void page_add_rmap(struct page * page, pte_t * ptep) +{ + struct pte_chain * pte_chain; + unsigned long pfn = pte_pfn(*ptep); + +#ifdef DEBUG_RMAP + if (!page || !ptep) + BUG(); + if (!pte_present(*ptep)) + BUG(); + if (!ptep_to_mm(ptep)) + BUG(); +#endif + + if (!pfn_valid(pfn) || PageReserved(page)) + return; + +#ifdef DEBUG_RMAP + pte_chain_lock(page); + { + struct pte_chain * pc; + if (PageDirect(page)) { + if (page->pte.direct == ptep) + BUG(); + } else { + for (pc = page->pte.chain; pc; pc = pc->next) { + if (pc->ptep == ptep) + BUG(); + } + } + } + pte_chain_unlock(page); +#endif + + pte_chain_lock(page); + + if (PageDirect(page)) { + /* Convert a direct pointer into a pte_chain */ + pte_chain = pte_chain_alloc(); + pte_chain->ptep = page->pte.direct; + pte_chain->next = NULL; + page->pte.chain = pte_chain; + ClearPageDirect(page); + } + if (page->pte.chain) { + /* Hook up the pte_chain to the page. */ + pte_chain = pte_chain_alloc(); + pte_chain->ptep = ptep; + pte_chain->next = page->pte.chain; + page->pte.chain = pte_chain; + } else { + page->pte.direct = ptep; + SetPageDirect(page); + } + + pte_chain_unlock(page); +} + +/** + * page_remove_rmap - take down reverse mapping to a page + * @page: page to remove mapping from + * @ptep: page table entry to remove + * + * Removes the reverse mapping from the pte_chain of the page, + * after that the caller can clear the page table entry and free + * the page. + * Caller needs to hold the mm->page_table_lock. + */ +void page_remove_rmap(struct page * page, pte_t * ptep) +{ + struct pte_chain * pc, * prev_pc = NULL; + unsigned long pfn = pte_pfn(*ptep); + + if (!page || !ptep) + BUG(); + if (!pfn_valid(pfn) || PageReserved(page)) + return; + + pte_chain_lock(page); + + if (PageDirect(page)) { + if (page->pte.direct == ptep) { + page->pte.direct = NULL; + ClearPageDirect(page); + goto out; + } + } else { + for (pc = page->pte.chain; pc; prev_pc = pc, pc = pc->next) { + if (pc->ptep == ptep) { + pte_chain_free(pc, prev_pc, page); + /* Check whether we can convert to direct */ + pc = page->pte.chain; + if (!pc->next) { + page->pte.direct = pc->ptep; + SetPageDirect(page); + pte_chain_free(pc, NULL, NULL); + } + goto out; + } + } + } +#ifdef DEBUG_RMAP + /* Not found. This should NEVER happen! */ + printk(KERN_ERR "page_remove_rmap: pte_chain %p not present.\n", ptep); + printk(KERN_ERR "page_remove_rmap: only found: "); + if (PageDirect(page)) { + printk("%p ", page->pte.direct); + } else { + for (pc = page->pte.chain; pc; pc = pc->next) + printk("%p ", pc->ptep); + } + printk("\n"); + printk(KERN_ERR "page_remove_rmap: driver cleared PG_reserved ?\n"); +#endif + +out: + pte_chain_unlock(page); + return; + +} + +/** + * try_to_unmap_one - worker function for try_to_unmap + * @page: page to unmap + * @ptep: page table entry to unmap from page + * + * Internal helper function for try_to_unmap, called for each page + * table entry mapping a page. Because locking order here is opposite + * to the locking order used by the page fault path, we use trylocks. + * Locking: + * pagemap_lru_lock page_launder() + * page lock page_launder(), trylock + * pte_chain_lock page_launder() + * mm->page_table_lock try_to_unmap_one(), trylock + */ +static int FASTCALL(try_to_unmap_one(struct page *, pte_t *)); +static int try_to_unmap_one(struct page * page, pte_t * ptep) +{ + unsigned long address = ptep_to_address(ptep); + struct mm_struct * mm = ptep_to_mm(ptep); + struct vm_area_struct * vma; + pte_t pte; + int ret; + + if (!mm) + BUG(); + + /* + * We need the page_table_lock to protect us from page faults, + * munmap, fork, etc... + */ + if (!spin_trylock(&mm->page_table_lock)) + return SWAP_AGAIN; + + /* During mremap, it's possible pages are not in a VMA. */ + vma = find_vma(mm, address); + if (!vma) { + ret = SWAP_FAIL; + goto out_unlock; + } + + /* The page is mlock()d, we cannot swap it out. */ + if (vma->vm_flags & VM_LOCKED) { + ret = SWAP_FAIL; + goto out_unlock; + } + + /* Nuke the page table entry. */ + pte = ptep_get_and_clear(ptep); + flush_tlb_page(vma, address); + flush_cache_page(vma, address); + + /* Store the swap location in the pte. See handle_pte_fault() ... */ + if (PageSwapCache(page)) { + swp_entry_t entry; + entry.val = page->index; + swap_duplicate(entry); + set_pte(ptep, swp_entry_to_pte(entry)); + } + + /* Move the dirty bit to the physical page now the pte is gone. */ + if (pte_dirty(pte)) + set_page_dirty(page); + + mm->rss--; + page_cache_release(page); + ret = SWAP_SUCCESS; + +out_unlock: + spin_unlock(&mm->page_table_lock); + return ret; +} + +/** + * try_to_unmap - try to remove all page table mappings to a page + * @page: the page to get unmapped + * + * Tries to remove all the page table entries which are mapping this + * page, used in the pageout path. Caller must hold pagemap_lru_lock + * and the page lock. Return values are: + * + * SWAP_SUCCESS - we succeeded in removing all mappings + * SWAP_AGAIN - we missed a trylock, try again later + * SWAP_FAIL - the page is unswappable + * SWAP_ERROR - an error occurred + */ +int try_to_unmap(struct page * page) +{ + struct pte_chain * pc, * next_pc, * prev_pc = NULL; + int ret = SWAP_SUCCESS; + + /* This page should not be on the pageout lists. */ + if (PageReserved(page)) + BUG(); + if (!PageLocked(page)) + BUG(); + /* We need backing store to swap out a page. */ + if (!page->mapping) + BUG(); + + if (PageDirect(page)) { + ret = try_to_unmap_one(page, page->pte.direct); + if (ret == SWAP_SUCCESS) { + page->pte.direct = NULL; + ClearPageDirect(page); + } + } else { + for (pc = page->pte.chain; pc; pc = next_pc) { + next_pc = pc->next; + switch (try_to_unmap_one(page, pc->ptep)) { + case SWAP_SUCCESS: + /* Free the pte_chain struct. */ + pte_chain_free(pc, prev_pc, page); + break; + case SWAP_AGAIN: + /* Skip this pte, remembering status. */ + prev_pc = pc; + ret = SWAP_AGAIN; + continue; + case SWAP_FAIL: + ret = SWAP_FAIL; + break; + case SWAP_ERROR: + ret = SWAP_ERROR; + break; + } + } + /* Check whether we can convert to direct pte pointer */ + pc = page->pte.chain; + if (pc && !pc->next) { + page->pte.direct = pc->ptep; + SetPageDirect(page); + pte_chain_free(pc, NULL, NULL); + } + } + return ret; +} + +/** + ** No more VM stuff below this comment, only pte_chain helper + ** functions. + **/ + +struct pte_chain * pte_chain_freelist; +spinlock_t pte_chain_freelist_lock = SPIN_LOCK_UNLOCKED; + +/* Maybe we should have standard ops for singly linked lists ... - Rik */ +static inline void pte_chain_push(struct pte_chain * pte_chain) +{ + pte_chain->ptep = NULL; + pte_chain->next = pte_chain_freelist; + pte_chain_freelist = pte_chain; +} + +static inline struct pte_chain * pte_chain_pop(void) +{ + struct pte_chain *pte_chain; + + pte_chain = pte_chain_freelist; + pte_chain_freelist = pte_chain->next; + pte_chain->next = NULL; + + return pte_chain; +} + +/** + * pte_chain_free - free pte_chain structure + * @pte_chain: pte_chain struct to free + * @prev_pte_chain: previous pte_chain on the list (may be NULL) + * @page: page this pte_chain hangs off (may be NULL) + * + * This function unlinks pte_chain from the singly linked list it + * may be on and adds the pte_chain to the free list. May also be + * called for new pte_chain structures which aren't on any list yet. + * Caller needs to hold the pte_chain_lock if the page is non-NULL. + */ +static inline void pte_chain_free(struct pte_chain * pte_chain, + struct pte_chain * prev_pte_chain, struct page * page) +{ + mod_page_state(used_pte_chains_bytes, -sizeof(struct pte_chain)); + if (prev_pte_chain) + prev_pte_chain->next = pte_chain->next; + else if (page) + page->pte.chain = pte_chain->next; + + spin_lock(&pte_chain_freelist_lock); + pte_chain_push(pte_chain); + spin_unlock(&pte_chain_freelist_lock); +} + +/** + * pte_chain_alloc - allocate a pte_chain struct + * + * Returns a pointer to a fresh pte_chain structure. Allocates new + * pte_chain structures as required. + * Caller needs to hold the page's pte_chain_lock. + */ +static inline struct pte_chain * pte_chain_alloc() +{ + struct pte_chain * pte_chain; + + spin_lock(&pte_chain_freelist_lock); + + /* Allocate new pte_chain structs as needed. */ + if (!pte_chain_freelist) + alloc_new_pte_chains(); + + /* Grab the first pte_chain from the freelist. */ + pte_chain = pte_chain_pop(); + + spin_unlock(&pte_chain_freelist_lock); + + mod_page_state(used_pte_chains_bytes, sizeof(struct pte_chain)); + return pte_chain; +} + +/** + * alloc_new_pte_chains - convert a free page to pte_chain structures + * + * Grabs a free page and converts it to pte_chain structures. We really + * should pre-allocate these earlier in the pagefault path or come up + * with some other trick. + * + * Note that we cannot use the slab cache because the pte_chain structure + * is way smaller than the minimum size of a slab cache allocation. + * Caller needs to hold the pte_chain_freelist_lock + */ +static void alloc_new_pte_chains() +{ + struct pte_chain * pte_chain = (void *) get_zeroed_page(GFP_ATOMIC); + int i = PAGE_SIZE / sizeof(struct pte_chain); + + if (pte_chain) { + inc_page_state(nr_pte_chain_pages); + for (; i-- > 0; pte_chain++) + pte_chain_push(pte_chain); + } else { + /* Yeah yeah, I'll fix the pte_chain allocation ... */ + panic("Fix pte_chain allocation, you lazy bastard!\n"); + } +} diff -Nru a/mm/swap.c b/mm/swap.c --- a/mm/swap.c Sat Jul 20 12:12:35 2002 +++ b/mm/swap.c Sat Jul 20 12:12:35 2002 @@ -41,6 +41,7 @@ if (PageLRU(page) && !PageActive(page)) { del_page_from_inactive_list(page); add_page_to_active_list(page); + KERNEL_STAT_INC(pgactivate); } } diff -Nru a/mm/swap_state.c b/mm/swap_state.c --- a/mm/swap_state.c Sat Jul 20 12:12:35 2002 +++ b/mm/swap_state.c Sat Jul 20 12:12:35 2002 @@ -75,8 +75,7 @@ INC_CACHE_INFO(noent_race); return -ENOENT; } - - error = add_to_page_cache_unique(page, &swapper_space, entry.val); + error = add_to_page_cache(page, &swapper_space, entry.val); if (error != 0) { swap_free(entry); if (error == -EEXIST) @@ -103,6 +102,69 @@ ClearPageDirty(page); __remove_inode_page(page); INC_CACHE_INFO(del_total); +} + +/** + * add_to_swap - allocate swap space for a page + * @page: page we want to move to swap + * + * Allocate swap space for the page and add the page to the + * swap cache. Caller needs to hold the page lock. + */ +int add_to_swap(struct page * page) +{ + swp_entry_t entry; + int flags; + + if (!PageLocked(page)) + BUG(); + + for (;;) { + entry = get_swap_page(); + if (!entry.val) + return 0; + + /* Radix-tree node allocations are performing + * GFP_ATOMIC allocations under PF_MEMALLOC. + * They can completely exhaust the page allocator. + * + * So PF_MEMALLOC is dropped here. This causes the slab + * allocations to fail earlier, so radix-tree nodes will + * then be allocated from the mempool reserves. + * + * We're still using __GFP_HIGH for radix-tree node + * allocations, so some of the emergency pools are available, + * just not all of them. + */ + + flags = current->flags; + current->flags &= ~PF_MEMALLOC; + current->flags |= PF_NOWARN; + ClearPageUptodate(page); /* why? */ + + /* + * Add it to the swap cache and mark it dirty + * (adding to the page cache will clear the dirty + * and uptodate bits, so we need to do it again) + */ + switch (add_to_swap_cache(page, entry)) { + case 0: /* Success */ + current->flags = flags; + SetPageUptodate(page); + set_page_dirty(page); + swap_free(entry); + return 1; + case -ENOMEM: /* radix-tree allocation */ + current->flags = flags; + swap_free(entry); + return 0; + default: /* ENOENT: raced */ + break; + } + /* Raced with "speculative" read_swap_cache_async */ + current->flags = flags; + swap_free(entry); + } } /* diff -Nru a/mm/swapfile.c b/mm/swapfile.c --- a/mm/swapfile.c Sat Jul 20 12:12:35 2002 +++ b/mm/swapfile.c Sat Jul 20 12:12:35 2002 @@ -383,6 +383,7 @@ return; get_page(page); set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot))); + page_add_rmap(page, dir); swap_free(entry); ++vma->vm_mm->rss; } diff -Nru a/mm/vmscan.c b/mm/vmscan.c --- a/mm/vmscan.c Sat Jul 20 12:12:34 2002 +++ b/mm/vmscan.c Sat Jul 20 12:12:34 2002 @@ -42,348 +42,24 @@ return page_count(page) - !!PagePrivate(page) == 1; } -/* - * On the swap_out path, the radix-tree node allocations are performing - * GFP_ATOMIC allocations under PF_MEMALLOC. They can completely - * exhaust the page allocator. This is bad; some pages should be left - * available for the I/O system to start sending the swapcache contents - * to disk. - * - * So PF_MEMALLOC is dropped here. This causes the slab allocations to fail - * earlier, so radix-tree nodes will then be allocated from the mempool - * reserves. - * - * We're still using __GFP_HIGH for radix-tree node allocations, so some of - * the emergency pools are available - just not all of them. - */ -static inline int -swap_out_add_to_swap_cache(struct page *page, swp_entry_t entry) +/* Must be called with page's pte_chain_lock held. */ +static inline int page_mapping_inuse(struct page * page) { - int flags = current->flags; - int ret; - - current->flags &= ~PF_MEMALLOC; - current->flags |= PF_NOWARN; - ClearPageUptodate(page); /* why? */ - ClearPageReferenced(page); /* why? */ - ret = add_to_swap_cache(page, entry); - current->flags = flags; - return ret; -} - -/* - * The swap-out function returns 1 if it successfully - * scanned all the pages it was asked to (`count'). - * It returns zero if it couldn't do anything, - * - * rss may decrease because pages are shared, but this - * doesn't count as having freed a page. - */ - -/* mm->page_table_lock is held. mmap_sem is not held */ -static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone) -{ - pte_t pte; - swp_entry_t entry; - - /* Don't look at this pte if it's been accessed recently. */ - if ((vma->vm_flags & VM_LOCKED) || ptep_test_and_clear_young(page_table)) { - mark_page_accessed(page); - return 0; - } - - /* Don't bother unmapping pages that are active */ - if (PageActive(page)) - return 0; + struct address_space *mapping = page->mapping; - /* Don't bother replenishing zones not under pressure.. */ - if (!memclass(page_zone(page), classzone)) - return 0; + /* Page is in somebody's page tables. */ + if (page->pte.chain) + return 1; - if (TestSetPageLocked(page)) + /* XXX: does this happen ? */ + if (!mapping) return 0; - if (PageWriteback(page)) - goto out_unlock; - - /* From this point on, the odds are that we're going to - * nuke this pte, so read and clear the pte. This hook - * is needed on CPUs which update the accessed and dirty - * bits in hardware. - */ - flush_cache_page(vma, address); - pte = ptep_get_and_clear(page_table); - flush_tlb_page(vma, address); - - if (pte_dirty(pte)) - set_page_dirty(page); - - /* - * Is the page already in the swap cache? If so, then - * we can just drop our reference to it without doing - * any IO - it's already up-to-date on disk. - */ - if (PageSwapCache(page)) { - entry.val = page->index; - swap_duplicate(entry); -set_swap_pte: - set_pte(page_table, swp_entry_to_pte(entry)); -drop_pte: - mm->rss--; - unlock_page(page); - { - int freeable = page_count(page) - - !!PagePrivate(page) <= 2; - page_cache_release(page); - return freeable; - } - } - - /* - * Is it a clean page? Then it must be recoverable - * by just paging it in again, and we can just drop - * it.. or if it's dirty but has backing store, - * just mark the page dirty and drop it. - * - * However, this won't actually free any real - * memory, as the page will just be in the page cache - * somewhere, and as such we should just continue - * our scan. - * - * Basically, this just makes it possible for us to do - * some real work in the future in "refill_inactive()". - */ - if (page->mapping) - goto drop_pte; - if (!PageDirty(page)) - goto drop_pte; - - /* - * Anonymous buffercache pages can be left behind by - * concurrent truncate and pagefault. - */ - if (PagePrivate(page)) - goto preserve; - - /* - * This is a dirty, swappable page. First of all, - * get a suitable swap entry for it, and make sure - * we have the swap cache set up to associate the - * page with that swap entry. - */ - for (;;) { - entry = get_swap_page(); - if (!entry.val) - break; - /* Add it to the swap cache and mark it dirty - * (adding to the page cache will clear the dirty - * and uptodate bits, so we need to do it again) - */ - switch (swap_out_add_to_swap_cache(page, entry)) { - case 0: /* Success */ - SetPageUptodate(page); - set_page_dirty(page); - goto set_swap_pte; - case -ENOMEM: /* radix-tree allocation */ - swap_free(entry); - goto preserve; - default: /* ENOENT: raced */ - break; - } - /* Raced with "speculative" read_swap_cache_async */ - swap_free(entry); - } - - /* No swap space left */ -preserve: - set_pte(page_table, pte); -out_unlock: - unlock_page(page); - return 0; -} - -/* mm->page_table_lock is held. mmap_sem is not held */ -static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone) -{ - pte_t * pte; - unsigned long pmd_end; - - if (pmd_none(*dir)) - return count; - if (pmd_bad(*dir)) { - pmd_ERROR(*dir); - pmd_clear(dir); - return count; - } - - pte = pte_offset_map(dir, address); - - pmd_end = (address + PMD_SIZE) & PMD_MASK; - if (end > pmd_end) - end = pmd_end; - - do { - if (pte_present(*pte)) { - unsigned long pfn = pte_pfn(*pte); - struct page *page = pfn_to_page(pfn); - - if (pfn_valid(pfn) && !PageReserved(page)) { - count -= try_to_swap_out(mm, vma, address, pte, page, classzone); - if (!count) { - address += PAGE_SIZE; - pte++; - break; - } - } - } - address += PAGE_SIZE; - pte++; - } while (address && (address < end)); - pte_unmap(pte - 1); - mm->swap_address = address; - return count; -} - -/* mm->page_table_lock is held. mmap_sem is not held */ -static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone) -{ - pmd_t * pmd; - unsigned long pgd_end; - - if (pgd_none(*dir)) - return count; - if (pgd_bad(*dir)) { - pgd_ERROR(*dir); - pgd_clear(dir); - return count; - } - - pmd = pmd_offset(dir, address); - - pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK; - if (pgd_end && (end > pgd_end)) - end = pgd_end; - - do { - count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone); - if (!count) - break; - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return count; -} - -/* mm->page_table_lock is held. mmap_sem is not held */ -static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count, zone_t * classzone) -{ - pgd_t *pgdir; - unsigned long end; - - /* Don't swap out areas which are reserved */ - if (vma->vm_flags & VM_RESERVED) - return count; - - pgdir = pgd_offset(mm, address); - - end = vma->vm_end; - if (address >= end) - BUG(); - do { - count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone); - if (!count) - break; - address = (address + PGDIR_SIZE) & PGDIR_MASK; - pgdir++; - } while (address && (address < end)); - return count; -} - -/* Placeholder for swap_out(): may be updated by fork.c:mmput() */ -struct mm_struct *swap_mm = &init_mm; - -/* - * Returns remaining count of pages to be swapped out by followup call. - */ -static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter, zone_t * classzone) -{ - unsigned long address; - struct vm_area_struct* vma; - - /* - * Find the proper vm-area after freezing the vma chain - * and ptes. - */ - spin_lock(&mm->page_table_lock); - address = mm->swap_address; - if (address == TASK_SIZE || swap_mm != mm) { - /* We raced: don't count this mm but try again */ - ++*mmcounter; - goto out_unlock; - } - vma = find_vma(mm, address); - if (vma) { - if (address < vma->vm_start) - address = vma->vm_start; - - for (;;) { - count = swap_out_vma(mm, vma, address, count, classzone); - vma = vma->vm_next; - if (!vma) - break; - if (!count) - goto out_unlock; - address = vma->vm_start; - } - } - /* Indicate that we reached the end of address space */ - mm->swap_address = TASK_SIZE; - -out_unlock: - spin_unlock(&mm->page_table_lock); - return count; -} - -static int FASTCALL(swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone)); -static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone) -{ - int counter, nr_pages = SWAP_CLUSTER_MAX; - struct mm_struct *mm; - - counter = mmlist_nr; - do { - if (need_resched()) { - __set_current_state(TASK_RUNNING); - schedule(); - } - - spin_lock(&mmlist_lock); - mm = swap_mm; - while (mm->swap_address == TASK_SIZE || mm == &init_mm) { - mm->swap_address = 0; - mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist); - if (mm == swap_mm) - goto empty; - swap_mm = mm; - } - - /* Make sure the mm doesn't disappear when we drop the lock.. */ - atomic_inc(&mm->mm_users); - spin_unlock(&mmlist_lock); - - nr_pages = swap_out_mm(mm, nr_pages, &counter, classzone); - - mmput(mm); - - if (!nr_pages) - return 1; - } while (--counter >= 0); + /* File is mmap'd by somebody. */ + if (!list_empty(&mapping->i_mmap) || !list_empty(&mapping->i_mmap_shared)) + return 1; return 0; - -empty: - spin_unlock(&mmlist_lock); - return 0; } static int @@ -392,7 +68,6 @@ { struct list_head * entry; struct address_space *mapping; - int max_mapped = nr_pages << (9 - priority); spin_lock(&pagemap_lru_lock); while (--max_scan >= 0 && @@ -417,6 +92,7 @@ list_del(entry); list_add(entry, &inactive_list); + KERNEL_STAT_INC(pgscan); /* * Zero page counts can happen because we unlink the pages @@ -428,10 +104,6 @@ if (!memclass(page_zone(page), classzone)) continue; - /* Racy check to avoid trylocking when not worthwhile */ - if (!PagePrivate(page) && (page_count(page) != 1 || !page->mapping)) - goto page_mapped; - /* * swap activity never enters the filesystem and is safe * for GFP_NOFS allocations. @@ -461,6 +133,60 @@ continue; } + /* + * The page is in active use or really unfreeable. Move to + * the active list. + */ + pte_chain_lock(page); + if (page_referenced(page) && page_mapping_inuse(page)) { + del_page_from_inactive_list(page); + add_page_to_active_list(page); + pte_chain_unlock(page); + unlock_page(page); + KERNEL_STAT_INC(pgactivate); + continue; + } + + /* + * Anonymous process memory without backing store. Try to + * allocate it some swap space here. + * + * XXX: implement swap clustering ? + */ + if (page->pte.chain && !page->mapping && !PagePrivate(page)) { + page_cache_get(page); + pte_chain_unlock(page); + spin_unlock(&pagemap_lru_lock); + if (!add_to_swap(page)) { + activate_page(page); + unlock_page(page); + page_cache_release(page); + spin_lock(&pagemap_lru_lock); + continue; + } + page_cache_release(page); + spin_lock(&pagemap_lru_lock); + pte_chain_lock(page); + } + + /* + * The page is mapped into the page tables of one or more + * processes. Try to unmap it here. + */ + if (page->pte.chain) { + switch (try_to_unmap(page)) { + case SWAP_ERROR: + case SWAP_FAIL: + goto page_active; + case SWAP_AGAIN: + pte_chain_unlock(page); + unlock_page(page); + continue; + case SWAP_SUCCESS: + ; /* try to free the page below */ + } + } + pte_chain_unlock(page); mapping = page->mapping; if (PageDirty(page) && is_page_cache_freeable(page) && @@ -469,7 +195,7 @@ * It is not critical here to write it only if * the page is unmapped beause any direct writer * like O_DIRECT would set the page's dirty bitflag - * on the phisical page after having successfully + * on the physical page after having successfully * pinned it and after the I/O to the page is finished, * so the direct writes to the page cannot get lost. */ @@ -511,19 +237,11 @@ if (try_to_release_page(page, gfp_mask)) { if (!mapping) { - /* - * We must not allow an anon page - * with no buffers to be visible on - * the LRU, so we unlock the page after - * taking the lru lock - */ - spin_lock(&pagemap_lru_lock); - unlock_page(page); - __lru_cache_del(page); - /* effectively free the page here */ + unlock_page(page); page_cache_release(page); + spin_lock(&pagemap_lru_lock); if (--nr_pages) continue; break; @@ -557,18 +275,7 @@ write_unlock(&mapping->page_lock); } unlock_page(page); -page_mapped: - if (--max_mapped >= 0) - continue; - - /* - * Alert! We've found too many mapped pages on the - * inactive list, so we start swapping out now! - */ - spin_unlock(&pagemap_lru_lock); - swap_out(priority, gfp_mask, classzone); - return nr_pages; - + continue; page_freeable: /* * It is critical to check PageDirty _after_ we made sure @@ -597,13 +304,23 @@ /* effectively free the page here */ page_cache_release(page); - + KERNEL_STAT_INC(pgsteal); if (--nr_pages) continue; - break; + goto out; +page_active: + /* + * OK, we don't know what to do with the page. + * It's no use keeping it here, so we move it to + * the active list. + */ + del_page_from_inactive_list(page); + add_page_to_active_list(page); + pte_chain_unlock(page); + unlock_page(page); + KERNEL_STAT_INC(pgactivate); } - spin_unlock(&pagemap_lru_lock); - +out: spin_unlock(&pagemap_lru_lock); return nr_pages; } @@ -611,8 +328,8 @@ * This moves pages from the active list to * the inactive list. * - * We move them the other way when we see the - * reference bit on the page. + * We move them the other way if the page is + * referenced by one or more processes, from rmap */ static void refill_inactive(int nr_pages) { @@ -625,15 +342,20 @@ page = list_entry(entry, struct page, lru); entry = entry->prev; - if (TestClearPageReferenced(page)) { + + KERNEL_STAT_INC(pgscan); + + pte_chain_lock(page); + if (page->pte.chain && page_referenced(page)) { list_del(&page->lru); list_add(&page->lru, &active_list); + pte_chain_unlock(page); continue; } - del_page_from_active_list(page); add_page_to_inactive_list(page); - SetPageReferenced(page); + pte_chain_unlock(page); + KERNEL_STAT_INC(pgdeactivate); } spin_unlock(&pagemap_lru_lock); } @@ -682,6 +404,8 @@ { int priority = DEF_PRIORITY; int nr_pages = SWAP_CLUSTER_MAX; + + KERNEL_STAT_INC(pageoutrun); do { nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); diff -Nru a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c --- a/net/sunrpc/clnt.c Sat Jul 20 12:12:35 2002 +++ b/net/sunrpc/clnt.c Sat Jul 20 12:12:35 2002 @@ -577,7 +577,7 @@ if (task->tk_status < 0) return; xprt_transmit(task); - if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc)) { + if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc) && task->tk_status >= 0) { task->tk_action = NULL; rpc_wake_up_task(task); } diff -Nru a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c --- a/net/sunrpc/xprt.c Sat Jul 20 12:12:34 2002 +++ b/net/sunrpc/xprt.c Sat Jul 20 12:12:34 2002 @@ -309,7 +309,7 @@ unsigned long cwnd; cwnd = xprt->cwnd; - if (result >= 0 && xprt->cong <= cwnd) { + if (result >= 0 && cwnd <= xprt->cong) { /* The (cwnd >> 1) term makes sure * the result gets rounded properly. */ cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; diff -Nru a/security/Config.help b/security/Config.help --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/security/Config.help Sat Jul 20 12:12:35 2002 @@ -0,0 +1,4 @@ +CONFIG_SECURITY_CAPABILITIES + This enables the "default" Linux capabilities functionality. + If you are unsure how to answer this question, answer Y. + diff -Nru a/security/Config.in b/security/Config.in --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/security/Config.in Sat Jul 20 12:12:35 2002 @@ -0,0 +1,7 @@ +# +# Security configuration +# +mainmenu_option next_comment +comment 'Security options' +define_bool CONFIG_SECURITY_CAPABILITIES y +endmenu diff -Nru a/security/Makefile b/security/Makefile --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/security/Makefile Sat Jul 20 12:12:35 2002 @@ -0,0 +1,13 @@ +# +# Makefile for the kernel security code +# + +# Objects that export symbols +export-objs := security.o + +# Object file lists +obj-y := security.o dummy.o + +obj-$(CONFIG_SECURITY_CAPABILITIES) += capability.o + +include $(TOPDIR)/Rules.make diff -Nru a/security/capability.c b/security/capability.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/security/capability.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,471 @@ +/* + * Capabilities Linux Security Module + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* flag to keep track of how we were registered */ +static int secondary; + +static int cap_capable (struct task_struct *tsk, int cap) +{ + /* Derived from include/linux/sched.h:capable. */ + if (cap_raised (tsk->cap_effective, cap)) + return 0; + else + return -EPERM; +} + +static int cap_sys_security (unsigned int id, unsigned int call, + unsigned long *args) +{ + return -ENOSYS; +} + +static int cap_ptrace (struct task_struct *parent, struct task_struct *child) +{ + /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */ + if (!cap_issubset (child->cap_permitted, current->cap_permitted) && + !capable (CAP_SYS_PTRACE)) + return -EPERM; + else + return 0; +} + +static int cap_capget (struct task_struct *target, kernel_cap_t * effective, + kernel_cap_t * inheritable, kernel_cap_t * permitted) +{ + /* Derived from kernel/capability.c:sys_capget. */ + *effective = cap_t (target->cap_effective); + *inheritable = cap_t (target->cap_inheritable); + *permitted = cap_t (target->cap_permitted); + return 0; +} + +static int cap_capset_check (struct task_struct *target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, + kernel_cap_t * permitted) +{ + /* Derived from kernel/capability.c:sys_capset. */ + /* verify restrictions on target's new Inheritable set */ + if (!cap_issubset (*inheritable, + cap_combine (target->cap_inheritable, + current->cap_permitted))) { + return -EPERM; + } + + /* verify restrictions on target's new Permitted set */ + if (!cap_issubset (*permitted, + cap_combine (target->cap_permitted, + current->cap_permitted))) { + return -EPERM; + } + + /* verify the _new_Effective_ is a subset of the _new_Permitted_ */ + if (!cap_issubset (*effective, *permitted)) { + return -EPERM; + } + + return 0; +} + +static void cap_capset_set (struct task_struct *target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, + kernel_cap_t * permitted) +{ + target->cap_effective = *effective; + target->cap_inheritable = *inheritable; + target->cap_permitted = *permitted; +} + +static int cap_bprm_alloc_security (struct linux_binprm *bprm) +{ + return 0; +} + +static int cap_bprm_set_security (struct linux_binprm *bprm) +{ + /* Copied from fs/exec.c:prepare_binprm. */ + + /* We don't have VFS support for capabilities yet */ + cap_clear (bprm->cap_inheritable); + cap_clear (bprm->cap_permitted); + cap_clear (bprm->cap_effective); + + /* To support inheritance of root-permissions and suid-root + * executables under compatibility mode, we raise all three + * capability sets for the file. + * + * If only the real uid is 0, we only raise the inheritable + * and permitted sets of the executable file. + */ + + if (!issecure (SECURE_NOROOT)) { + if (bprm->e_uid == 0 || current->uid == 0) { + cap_set_full (bprm->cap_inheritable); + cap_set_full (bprm->cap_permitted); + } + if (bprm->e_uid == 0) + cap_set_full (bprm->cap_effective); + } + return 0; +} + +static int cap_bprm_check_security (struct linux_binprm *bprm) +{ + return 0; +} + +static void cap_bprm_free_security (struct linux_binprm *bprm) +{ + return; +} + +/* Copied from fs/exec.c */ +static inline int must_not_trace_exec (struct task_struct *p) +{ + return (p->ptrace & PT_PTRACED) && !(p->ptrace & PT_PTRACE_CAP); +} + +static void cap_bprm_compute_creds (struct linux_binprm *bprm) +{ + /* Derived from fs/exec.c:compute_creds. */ + kernel_cap_t new_permitted, working; + int do_unlock = 0; + + new_permitted = cap_intersect (bprm->cap_permitted, cap_bset); + working = cap_intersect (bprm->cap_inheritable, + current->cap_inheritable); + new_permitted = cap_combine (new_permitted, working); + + if (!cap_issubset (new_permitted, current->cap_permitted)) { + current->mm->dumpable = 0; + + lock_kernel (); + if (must_not_trace_exec (current) + || atomic_read (¤t->fs->count) > 1 + || atomic_read (¤t->files->count) > 1 + || atomic_read (¤t->sig->count) > 1) { + if (!capable (CAP_SETPCAP)) { + new_permitted = cap_intersect (new_permitted, + current-> + cap_permitted); + } + } + do_unlock = 1; + } + + /* For init, we want to retain the capabilities set + * in the init_task struct. Thus we skip the usual + * capability rules */ + if (current->pid != 1) { + current->cap_permitted = new_permitted; + current->cap_effective = + cap_intersect (new_permitted, bprm->cap_effective); + } + + /* AUD: Audit candidate if current->cap_effective is set */ + + if (do_unlock) + unlock_kernel (); + + current->keep_capabilities = 0; +} + +static int cap_task_create (unsigned long clone_flags) +{ + return 0; +} + +static int cap_task_alloc_security (struct task_struct *p) +{ + return 0; +} + +static void cap_task_free_security (struct task_struct *p) +{ + return; +} + +static int cap_task_setuid (uid_t id0, uid_t id1, uid_t id2, int flags) +{ + return 0; +} + +/* moved from kernel/sys.c. */ +/* + * cap_emulate_setxuid() fixes the effective / permitted capabilities of + * a process after a call to setuid, setreuid, or setresuid. + * + * 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of + * {r,e,s}uid != 0, the permitted and effective capabilities are + * cleared. + * + * 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective + * capabilities of the process are cleared. + * + * 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective + * capabilities are set to the permitted capabilities. + * + * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should + * never happen. + * + * -astor + * + * cevans - New behaviour, Oct '99 + * A process may, via prctl(), elect to keep its capabilities when it + * calls setuid() and switches away from uid==0. Both permitted and + * effective sets will be retained. + * Without this change, it was impossible for a daemon to drop only some + * of its privilege. The call to setuid(!=0) would drop all privileges! + * Keeping uid 0 is not an option because uid 0 owns too many vital + * files.. + * Thanks to Olaf Kirch and Peter Benie for spotting this. + */ +static inline void cap_emulate_setxuid (int old_ruid, int old_euid, + int old_suid) +{ + if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) && + (current->uid != 0 && current->euid != 0 && current->suid != 0) && + !current->keep_capabilities) { + cap_clear (current->cap_permitted); + cap_clear (current->cap_effective); + } + if (old_euid == 0 && current->euid != 0) { + cap_clear (current->cap_effective); + } + if (old_euid != 0 && current->euid == 0) { + current->cap_effective = current->cap_permitted; + } +} + +static int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, + int flags) +{ + switch (flags) { + case LSM_SETID_RE: + case LSM_SETID_ID: + case LSM_SETID_RES: + /* Copied from kernel/sys.c:setreuid/setuid/setresuid. */ + if (!issecure (SECURE_NO_SETUID_FIXUP)) { + cap_emulate_setxuid (old_ruid, old_euid, old_suid); + } + break; + case LSM_SETID_FS: + { + uid_t old_fsuid = old_ruid; + + /* Copied from kernel/sys.c:setfsuid. */ + + /* + * FIXME - is fsuser used for all CAP_FS_MASK capabilities? + * if not, we might be a bit too harsh here. + */ + + if (!issecure (SECURE_NO_SETUID_FIXUP)) { + if (old_fsuid == 0 && current->fsuid != 0) { + cap_t (current->cap_effective) &= + ~CAP_FS_MASK; + } + if (old_fsuid != 0 && current->fsuid == 0) { + cap_t (current->cap_effective) |= + (cap_t (current->cap_permitted) & + CAP_FS_MASK); + } + } + break; + } + default: + return -EINVAL; + } + + return 0; +} + +static int cap_task_setgid (gid_t id0, gid_t id1, gid_t id2, int flags) +{ + return 0; +} + +static int cap_task_setpgid (struct task_struct *p, pid_t pgid) +{ + return 0; +} + +static int cap_task_getpgid (struct task_struct *p) +{ + return 0; +} + +static int cap_task_getsid (struct task_struct *p) +{ + return 0; +} + +static int cap_task_setgroups (int gidsetsize, gid_t * grouplist) +{ + return 0; +} + +static int cap_task_setnice (struct task_struct *p, int nice) +{ + return 0; +} + +static int cap_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) +{ + return 0; +} + +static int cap_task_setscheduler (struct task_struct *p, int policy, + struct sched_param *lp) +{ + return 0; +} + +static int cap_task_getscheduler (struct task_struct *p) +{ + return 0; +} + +static int cap_task_wait (struct task_struct *p) +{ + return 0; +} + +static int cap_task_kill (struct task_struct *p, struct siginfo *info, int sig) +{ + return 0; +} + +static int cap_task_prctl (int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5) +{ + return 0; +} + +static void cap_task_kmod_set_label (void) +{ + cap_set_full (current->cap_effective); + return; +} + +static void cap_task_reparent_to_init (struct task_struct *p) +{ + p->cap_effective = CAP_INIT_EFF_SET; + p->cap_inheritable = CAP_INIT_INH_SET; + p->cap_permitted = CAP_FULL_SET; + p->keep_capabilities = 0; + return; +} + +static int cap_register (const char *name, struct security_operations *ops) +{ + return -EINVAL; +} + +static int cap_unregister (const char *name, struct security_operations *ops) +{ + return -EINVAL; +} + +static struct security_operations capability_ops = { + ptrace: cap_ptrace, + capget: cap_capget, + capset_check: cap_capset_check, + capset_set: cap_capset_set, + capable: cap_capable, + sys_security: cap_sys_security, + + bprm_alloc_security: cap_bprm_alloc_security, + bprm_free_security: cap_bprm_free_security, + bprm_compute_creds: cap_bprm_compute_creds, + bprm_set_security: cap_bprm_set_security, + bprm_check_security: cap_bprm_check_security, + + task_create: cap_task_create, + task_alloc_security: cap_task_alloc_security, + task_free_security: cap_task_free_security, + task_setuid: cap_task_setuid, + task_post_setuid: cap_task_post_setuid, + task_setgid: cap_task_setgid, + task_setpgid: cap_task_setpgid, + task_getpgid: cap_task_getpgid, + task_getsid: cap_task_getsid, + task_setgroups: cap_task_setgroups, + task_setnice: cap_task_setnice, + task_setrlimit: cap_task_setrlimit, + task_setscheduler: cap_task_setscheduler, + task_getscheduler: cap_task_getscheduler, + task_wait: cap_task_wait, + task_kill: cap_task_kill, + task_prctl: cap_task_prctl, + task_kmod_set_label: cap_task_kmod_set_label, + task_reparent_to_init: cap_task_reparent_to_init, + + register_security: cap_register, + unregister_security: cap_unregister, +}; + +#if defined(CONFIG_SECURITY_CAPABILITIES_MODULE) +#define MY_NAME THIS_MODULE->name +#else +#define MY_NAME "capability" +#endif + +static int __init capability_init (void) +{ + /* register ourselves with the security framework */ + if (register_security (&capability_ops)) { + printk (KERN_INFO + "Failure registering capabilities with the kernel\n"); + /* try registering with primary module */ + if (mod_reg_security (MY_NAME, &capability_ops)) { + printk (KERN_INFO "Failure registering capabilities " + "with primary security module.\n"); + return -EINVAL; + } + secondary = 1; + } + printk (KERN_INFO "Capability LSM initialized\n"); + return 0; +} + +static void __exit capability_exit (void) +{ + /* remove ourselves from the security framework */ + if (secondary) { + if (mod_unreg_security (MY_NAME, &capability_ops)) + printk (KERN_INFO "Failure unregistering capabilities " + "with primary module.\n"); + return; + } + + if (unregister_security (&capability_ops)) { + printk (KERN_INFO + "Failure unregistering capabilities with the kernel\n"); + } +} + +module_init (capability_init); +module_exit (capability_exit); + +MODULE_DESCRIPTION("Standard Linux Capabilities Security Module"); +MODULE_LICENSE("GPL"); diff -Nru a/security/dummy.c b/security/dummy.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/security/dummy.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,236 @@ +/* + * Stub functions for the default security function pointers in case no + * security model is loaded. + * + * Copyright (C) 2001 WireX Communications, Inc + * Copyright (C) 2001 Greg Kroah-Hartman + * Copyright (C) 2001 Networks Associates Technology, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +static int dummy_ptrace (struct task_struct *parent, struct task_struct *child) +{ + return 0; +} + +static int dummy_capget (struct task_struct *target, kernel_cap_t * effective, + kernel_cap_t * inheritable, kernel_cap_t * permitted) +{ + return 0; +} + +static int dummy_capset_check (struct task_struct *target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, + kernel_cap_t * permitted) +{ + return 0; +} + +static void dummy_capset_set (struct task_struct *target, + kernel_cap_t * effective, + kernel_cap_t * inheritable, + kernel_cap_t * permitted) +{ + return; +} + +static int dummy_capable (struct task_struct *tsk, int cap) +{ + if (cap_is_fs_cap (cap) ? tsk->fsuid == 0 : tsk->euid == 0) + /* capability granted */ + return 0; + + /* capability denied */ + return -EPERM; +} + +static int dummy_sys_security (unsigned int id, unsigned int call, + unsigned long *args) +{ + return -ENOSYS; +} + +static int dummy_bprm_alloc_security (struct linux_binprm *bprm) +{ + return 0; +} + +static void dummy_bprm_free_security (struct linux_binprm *bprm) +{ + return; +} + +static void dummy_bprm_compute_creds (struct linux_binprm *bprm) +{ + return; +} + +static int dummy_bprm_set_security (struct linux_binprm *bprm) +{ + return 0; +} + +static int dummy_bprm_check_security (struct linux_binprm *bprm) +{ + return 0; +} + +static int dummy_task_create (unsigned long clone_flags) +{ + return 0; +} + +static int dummy_task_alloc_security (struct task_struct *p) +{ + return 0; +} + +static void dummy_task_free_security (struct task_struct *p) +{ + return; +} + +static int dummy_task_setuid (uid_t id0, uid_t id1, uid_t id2, int flags) +{ + return 0; +} + +static int dummy_task_post_setuid (uid_t id0, uid_t id1, uid_t id2, int flags) +{ + return 0; +} + +static int dummy_task_setgid (gid_t id0, gid_t id1, gid_t id2, int flags) +{ + return 0; +} + +static int dummy_task_setpgid (struct task_struct *p, pid_t pgid) +{ + return 0; +} + +static int dummy_task_getpgid (struct task_struct *p) +{ + return 0; +} + +static int dummy_task_getsid (struct task_struct *p) +{ + return 0; +} + +static int dummy_task_setgroups (int gidsetsize, gid_t * grouplist) +{ + return 0; +} + +static int dummy_task_setnice (struct task_struct *p, int nice) +{ + return 0; +} + +static int dummy_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) +{ + return 0; +} + +static int dummy_task_setscheduler (struct task_struct *p, int policy, + struct sched_param *lp) +{ + return 0; +} + +static int dummy_task_getscheduler (struct task_struct *p) +{ + return 0; +} + +static int dummy_task_wait (struct task_struct *p) +{ + return 0; +} + +static int dummy_task_kill (struct task_struct *p, struct siginfo *info, + int sig) +{ + return 0; +} + +static int dummy_task_prctl (int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5) +{ + return 0; +} + +static void dummy_task_kmod_set_label (void) +{ + return; +} + +static void dummy_task_reparent_to_init (struct task_struct *p) +{ + p->euid = p->fsuid = 0; + return; +} + +static int dummy_register (const char *name, struct security_operations *ops) +{ + return -EINVAL; +} + +static int dummy_unregister (const char *name, struct security_operations *ops) +{ + return -EINVAL; +} + +struct security_operations dummy_security_ops = { + ptrace: dummy_ptrace, + capget: dummy_capget, + capset_check: dummy_capset_check, + capset_set: dummy_capset_set, + capable: dummy_capable, + sys_security: dummy_sys_security, + + bprm_alloc_security: dummy_bprm_alloc_security, + bprm_free_security: dummy_bprm_free_security, + bprm_compute_creds: dummy_bprm_compute_creds, + bprm_set_security: dummy_bprm_set_security, + bprm_check_security: dummy_bprm_check_security, + + task_create: dummy_task_create, + task_alloc_security: dummy_task_alloc_security, + task_free_security: dummy_task_free_security, + task_setuid: dummy_task_setuid, + task_post_setuid: dummy_task_post_setuid, + task_setgid: dummy_task_setgid, + task_setpgid: dummy_task_setpgid, + task_getpgid: dummy_task_getpgid, + task_getsid: dummy_task_getsid, + task_setgroups: dummy_task_setgroups, + task_setnice: dummy_task_setnice, + task_setrlimit: dummy_task_setrlimit, + task_setscheduler: dummy_task_setscheduler, + task_getscheduler: dummy_task_getscheduler, + task_wait: dummy_task_wait, + task_kill: dummy_task_kill, + task_prctl: dummy_task_prctl, + task_kmod_set_label: dummy_task_kmod_set_label, + task_reparent_to_init: dummy_task_reparent_to_init, + + register_security: dummy_register, + unregister_security: dummy_unregister, +}; + diff -Nru a/security/security.c b/security/security.c --- /dev/null Wed Dec 31 16:00:00 1969 +++ b/security/security.c Sat Jul 20 12:12:35 2002 @@ -0,0 +1,249 @@ +/* + * Security plug functions + * + * Copyright (C) 2001 WireX Communications, Inc + * Copyright (C) 2001 Greg Kroah-Hartman + * Copyright (C) 2001 Networks Associates Technology, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#define SECURITY_SCAFFOLD_VERSION "1.0.0" + +extern struct security_operations dummy_security_ops; /* lives in dummy.c */ + +struct security_operations *security_ops; /* Initialized to NULL */ + +/* This macro checks that all pointers in a struct are non-NULL. It + * can be fooled by struct padding for object tile alignment and when + * pointers to data and pointers to functions aren't the same size. + * Yes it's ugly, we'll replace it if it becomes a problem. + */ +#define VERIFY_STRUCT(struct_type, s, e) \ + do { \ + unsigned long * __start = (unsigned long *)(s); \ + unsigned long * __end = __start + \ + sizeof(struct_type)/sizeof(unsigned long *); \ + while (__start != __end) { \ + if (!*__start) { \ + printk(KERN_INFO "%s is missing something\n",\ + #struct_type); \ + e++; \ + break; \ + } \ + __start++; \ + } \ + } while (0) + +static int inline verify (struct security_operations *ops) +{ + int err; + + /* verify the security_operations structure exists */ + if (!ops) { + printk (KERN_INFO "Passed a NULL security_operations " + "pointer, " __FUNCTION__ " failed.\n"); + return -EINVAL; + } + + /* Perform a little sanity checking on our inputs */ + err = 0; + + /* This first check scans the whole security_ops struct for + * missing structs or functions. + * + * (There is no further check now, but will leave as is until + * the lazy registration stuff is done -- JM). + */ + VERIFY_STRUCT(struct security_operations, ops, err); + + if (err) { + printk (KERN_INFO "Not enough functions specified in the " + "security_operation structure, " __FUNCTION__ + " failed.\n"); + return -EINVAL; + } + return 0; +} + +/** + * security_scaffolding_startup - initialzes the security scaffolding framework + * + * This should be called early in the kernel initialization sequence. + */ +int security_scaffolding_startup (void) +{ + printk (KERN_INFO "Security Scaffold v" SECURITY_SCAFFOLD_VERSION + " initialized\n"); + + security_ops = &dummy_security_ops; + + return 0; +} + +/** + * register_security - registers a security framework with the kernel + * @ops: a pointer to the struct security_options that is to be registered + * + * This function is to allow a security module to register itself with the + * kernel security subsystem. Some rudimentary checking is done on the @ops + * value passed to this function. A call to unregister_security() should be + * done to remove this security_options structure from the kernel. + * + * If the @ops structure does not contain function pointers for all hooks in + * the structure, or there is already a security module registered with the + * kernel, an error will be returned. Otherwise 0 is returned on success. + */ +int register_security (struct security_operations *ops) +{ + + if (verify (ops)) { + printk (KERN_INFO __FUNCTION__ " could not verify " + "security_operations structure.\n"); + return -EINVAL; + } + if (security_ops != &dummy_security_ops) { + printk (KERN_INFO "There is already a security " + "framework initialized, " __FUNCTION__ " failed.\n"); + return -EINVAL; + } + + security_ops = ops; + + return 0; +} + +/** + * unregister_security - unregisters a security framework with the kernel + * @ops: a pointer to the struct security_options that is to be registered + * + * This function removes a struct security_operations variable that had + * previously been registered with a successful call to register_security(). + * + * If @ops does not match the valued previously passed to register_security() + * an error is returned. Otherwise the default security options is set to the + * the dummy_security_ops structure, and 0 is returned. + */ +int unregister_security (struct security_operations *ops) +{ + if (ops != security_ops) { + printk (KERN_INFO __FUNCTION__ ": trying to unregister " + "a security_opts structure that is not " + "registered, failing.\n"); + return -EINVAL; + } + + security_ops = &dummy_security_ops; + + return 0; +} + +/** + * mod_reg_security - allows security modules to be "stacked" + * @name: a pointer to a string with the name of the security_options to be registered + * @ops: a pointer to the struct security_options that is to be registered + * + * This function allows security modules to be stacked if the currently loaded + * security module allows this to happen. It passes the @name and @ops to the + * register_security function of the currently loaded security module. + * + * The return value depends on the currently loaded security module, with 0 as + * success. + */ +int mod_reg_security (const char *name, struct security_operations *ops) +{ + if (verify (ops)) { + printk (KERN_INFO __FUNCTION__ " could not verify " + "security operations.\n"); + return -EINVAL; + } + + if (ops == security_ops) { + printk (KERN_INFO __FUNCTION__ " security operations " + "already registered.\n"); + return -EINVAL; + } + + return security_ops->register_security (name, ops); +} + +/** + * mod_unreg_security - allows a security module registered with mod_reg_security() to be unloaded + * @name: a pointer to a string with the name of the security_options to be removed + * @ops: a pointer to the struct security_options that is to be removed + * + * This function allows security modules that have been successfully registered + * with a call to mod_reg_security() to be unloaded from the system. + * This calls the currently loaded security module's unregister_security() call + * with the @name and @ops variables. + * + * The return value depends on the currently loaded security module, with 0 as + * success. + */ +int mod_unreg_security (const char *name, struct security_operations *ops) +{ + if (ops == security_ops) { + printk (KERN_INFO __FUNCTION__ " invalid attempt to unregister " + " primary security ops.\n"); + return -EINVAL; + } + + return security_ops->unregister_security (name, ops); +} + +/** + * capable - calls the currently loaded security module's capable() function with the specified capability + * @cap: the requested capability level. + * + * This function calls the currently loaded security module's cabable() + * function with a pointer to the current task and the specified @cap value. + * + * This allows the security module to implement the capable function call + * however it chooses to. + */ +int capable (int cap) +{ + if (security_ops->capable (current, cap)) { + /* capability denied */ + return 0; + } + + /* capability granted */ + current->flags |= PF_SUPERPRIV; + return 1; +} + +/** + * sys_security - security syscall multiplexor. + * @id: module id + * @call: call identifier + * @args: arg list for call + * + * Similar to sys_socketcall. Can use id to help identify which module user + * app is talking to. The recommended convention for creating the + * hexadecimal id value is: + * 'echo "Name_of_module" | md5sum | cut -c -8'. + * By following this convention, there's no need for a central registry. + */ +asmlinkage long sys_security (unsigned int id, unsigned int call, + unsigned long *args) +{ + return security_ops->sys_security (id, call, args); +} + +EXPORT_SYMBOL (register_security); +EXPORT_SYMBOL (unregister_security); +EXPORT_SYMBOL (mod_reg_security); +EXPORT_SYMBOL (mod_unreg_security); +EXPORT_SYMBOL (capable); +EXPORT_SYMBOL (security_ops);